var/home/core/zuul-output/0000755000175000017500000000000015136377151014536 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136404225015473 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000266252415136404144020271 0ustar corecoredzikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD  >lEڤ펯_ˎ6Ϸ7+%f?長ox[o8W5֛!Kޒ/h3_.gSeq5v(×_~^ǿq]n>߮}+ԏbś E^"Y^-Vۋz7wH׋0g"ŒGǯguz|ny;#)a "b BLc?^^4[ftlR%KF^j 8DΆgS^Kz۞_W#|`zIlp_@oEy5 fs&2x*g+W4m ɭiE߳Kf^?·0* TQ0Z%bb oHIl.f/M1FJdl!و4Gf#C2lIw]BPIjfkAubTI *JB4?PxQs# `LK3@g(C U {oLtiGgz֝$,z'vǛVB} eRB0R딏]dP>Li.`|!>ڌj+ACl21E^#QDuxGvZ4c$)9ӋrYWoxCNQWs]8M%3KpNGIrND}2SRCK.(^$0^@hH9%!40Jm>*Kdg?y7|&#)3+o,2s%R>!%*XC7Ln* wCƕH#FLzsѹ Xߛk׹1{,wŻ4v+(n^RϚOGO;5p Cj·1z_j( ,"z-Ee}t(QCuˠMkmi+2z5iݸ6C~z+_Ex$\}*9h>t m2m`QɢJ[a|$ᑨj:D+ʎ; 9Gacm_jY-y`)͐o΁GWo(C U ?}aK+d&?>Y;ufʕ"uZ0EyT0: =XVy#iEW&q]#v0nFNV-9JrdK\D2s&[#bE(mV9ىN囋{V5e1߯F1>9r;:J_T{*T\hVQxi0LZD T{ /WHc&)_`i=į`PÝr JovJw`纪}PSSii4wT (Dnm_`c46A>hPr0ιӦ q:Np8>R'8::8g'h"M{qd 㦿GGk\(Rh07uB^WrN_Ŏ6W>Bߔ)bQ) <4G0 C.iTEZ{(¥:-³xlՐ0A_Fݗw)(c>bugbǎ\J;tf*H7(?PЃkLM)}?=XkLd. yK>"dgӦ{ qke5@eTR BgT9(TڢKBEV*DDQ$3gFfThmIjh}iL;R:7A}Ss8ҧ ΁weor(Ё^g׬JyU{v3Fxlţ@U5$&~ay\CJ68?%tS KK3,87'T`ɻaNhIcn#T[2XDRcm0TJ#r)٧4!)'qϷכrTMiHe1[7c(+!C[KԹҤ 0q;;xG'ʐƭ5J; 6M^ CL3EQXy0Hy[``Xm635o,j&X}6$=}0vJ{*.Jw *nacԇ&~hb[nӉ>'݌6od NN&DǭZrb5Iffe6Rh&C4F;D3T\[ bk5̕@UFB1/ z/}KXg%q3Ifq CXReQP2$TbgK ء#AZ9 K>UHkZ;oﴍ8MEDa3[p1>m`XYB[9% E*:`cBCIqC(1&b f]fNhdQvݸCVA/P_]F@?qr7@sON_}ۿ릶ytoyמseQv^sP3.sP1'Ns}d_ս=f1Jid % Jwe`40^|ǜd]z dJR-Дxq4lZ,Z[|e 'Ƙ$b2JOh k[b>¾h[;:>OM=y)֖[Sm5*_?$cjf `~ߛUIOvl/.4`P{d056 %w ^?sʫ"nK)D}O >%9r}1j#e[tRQ9*ء !ǨLJ- upƜ/4cY\[|Xs;ɾ7-<S1wg y &SL9qk;NP> ,wդjtah-j:_[;4Wg_0K>є0vNۈ/ze={< 1;/STcD,ڙ`[3XPo0TXx ZYޏ=S-ܑ2ƹڞ7կZ8m1`qAewQT*:ÊxtŨ!u}$K6tem@t):êtx: `)L`m GƂ%k1羨(zv:U!2`cV, lNdV5m$/KFS#0gLwNO6¨h}'XvوPkWn}/7d*1q* c0.$\+XND]P*84[߷Q뽃J޸8iD WPC49 *#LC ءzCwS%'m'3ܚ|otoʉ!9:PZ"ρ5M^kVځIX%G^{;+Fi7Z(ZN~;MM/u2}ݼPݫedKAd#[ BeMP6" YǨ 0vyv?7R F"}8&q]ows!Z!C4g*8n]rMQ ;N>Sr??Ӽ]\+hSQזL c̖F4BJ2ᮚ苮p(r%Q 6<$(Ӣ(RvA A-^dX?+'h=TԫeVިO? )-1 8/%\hC(:=4< ,RmDRWfRoUJy ŗ-ܲ(4k%הrΒ]rύW -e]hx&gs7,6BxzxօoFMA['҉F=NGD4sTq1HPld=Q,DQ IJipqc2*;/!~x]y7D7@u邗`unn_ư-a9t_/.9tTo]r8-X{TMYtt =0AMUk}G9^UA,;Tt,"Dxl DfA\w; &`Ͱ٢x'H/jh7hM=~ ֟y[dI~fHIqC۶1Ik\)3 5Ķ']?SؠC"j_6Ÿ9؎]TTjm\D^x6ANbC ]tVUKe$,\ܺI `Qز@UӬ@B {~6caR!=A>\+܁<lW Gϸ}^w'̅dk  C 7fbU{3Se[s %'!?xL 2ڲ]>i+m^CM&WTj7ȗE!NC6P}H`k(FUM gul)b ;2n6'k}ˍ[`-fYX_pL +1wu(#'3"fxsuҮױdy.0]?ݽb+ uV4}rdM$ѢIA$;~Lvigu+]NC5ÿ nNჶT@~ܥ 7-mU,\rXmQALglNʆ P7k%v>"WCyVtnV K`pC?fE?~fjBwU&'ᚡilRї`m] leu]+?T4v\% ;qF0qV(]pP4W =d#t ru\M{Nj.~27)p|Vn60֭l$4԰vg`i{ 6uwŇctyX{>GXg&[ņzP8_ "J~7+0_t[%XU͍ &dtO:odtRWon%*44JٵK+Woc.F3 %N%FF"HH"\$ۤ_5UWd̡bh塘ZRI&{3TUFp/:4TƳ5[۲yzz+ 4D.Ճ`!TnPFp':.4dMFN=/5ܙz,4kA<:z7y0^} "NqK$2$ Ri ?2,ᙌEK@-V3ʱd:/4Kwm2otXjpgjpF)*=}Coޗq !Ȥ4΍ +aM(VldX ][T !Ȱ|HN~6y,⒊)$e{)SR#kהyϛ7^i58f4PmB8 Y{qeφvk73:1@ƛ.{f8IGv*1藺yx27M=>+VnG;\<x7v21՚H :[Γd!E'a4n?k[A׈(sob 41Y9(^SE@7`KIK`kx& V`X0,%pe_ן >hd xе"Q4SUwy x<'o_~#6$g!D$c=5ۄX[ു RzG:柺[ӏ[3frl ô ހ^2TӘUAT!94[[m۾\T)W> lv+ H\FpG)ۏjk_c51̃^cn ba-X/#=Im41NLu\9ETp^poAOO&A+mj(^>c/"ɭex^k$# $V :]PGszy".zɪ) ӓT)D:fci[*`cc&VhfFp佬)/Wdځ+ uR<$}Kr'ݔTW$md1"#mC_@:m P>DEu&ݛȘP\ޅ̼|%D Ɖ`Pj . ֈ,ixp`ttOKBDޙ''aLA2s0(G2E<I:xsB.ȼ*d42I:<ŋu#~us{dW<2~sQ37.&lOľu74c?MՏړ@ -N*CB=i3,qjGkUտu6k Cb8hs&sM@-=X(i7=@He%ISd$&iA|i MiʏݸT{r[j顒x.Ƞ"m@Hy_I )j|s#RGI!dTKL&4K>#stV \'xMgaSZNg8>e!^f%cYr]qs:"̊;isXa]d+"v=x7p.fZCg_Ys;pE&\U}ܫSh])qKYAـhhdEnU14&G * QIQs;rԩ.k83֖8Muqu_48dHܥlWW q>fu6+'}xu\Veelz`Zbym gp8펠ˋֆ:1IC8qٞ\vXçL ]X/r}7O}Wh,h ;RQ=]u00yiC۔I^3!?H6iUH:ô 4P$rT`%2Aq-֢׍qt=@x#~0)p# ы9'iri]ͪ/@繁qVGCڤr,DihB ,m 9 _$q3= A$IC"6g^4e`Xo(D*6"^eTh'4xpFڜe'fVQ7~'c L^ԯwIڣA.}H;Ë*׬=`^ 9]r鐃 -Dfi2|QwZk‹u^6DQ1&H凎c!n[mi3)WfsF:M"uҷs.1!뾧1%s,hQs|hx̗3%*v9(I;:'>uQ+v)vR/egBhAAdh]4H:nV$tHI98/)=mͭ ڐn}}~ק?g_6WĩDRc0]rY9'z .(jHI :{HG}HDN`h7@{jnE#[dz;n#y 9D*A$$"^)dVQ.(rO6ӟZw_Ȣaޒu'- ^_,G;U\cAAz7EtlLuoXuA}bT2H_*kIG?S(קjhg 5EF5uKkBYx-qCfqsn[?_r=V:х@mfVg,w}QJUtesYyt7Yr+"*DtO/o۷~|hw^5wE of7cꃱ.)7.u/}tPTGc 5tW> l/`I~>|灹mQ$>N |gZ ͜IH[RNOMTq~g d0/0Љ!yB.hH׽;}VLGp3I#8'xal&Ȑc$ d7?K6xAH1H#:f _tŒ^ hgiNas*@K{7tH*t쬆Ny497ͩ KVsVokwW&4*H'\ d$]Vmr달v9dB.bq:__xW|1=6 R3y^ E#LB ZaZd1,]ןkznxtK|v+`VZ3JϧC^|/{ś}r3 >6׳oƄ%VDSWn 0,qh! E-Z%ܹpU:&&fX+EǬ.ťqpNZܗÅxjsD|[,_4EqgMƒK6f/FXJRF>i XʽAQGwG%mgo 恤hˍJ_SgskwI\t`ﶘ080ƱQŀllKX@116fqo>NrU Ѣ9*|ãeeH7.z!<7zG4p9tV|̢T`˖E ;;,tTaIUle*$!>*mBA2,gJIn_kSz)JC]?X(OPJS3.}clݨ{e!MB,cB߮4af祋,1/_xq=fBRO0P'֫-kbM6Apw,GO2}MGK'#+սE^dˋf6Y bQEz}eҏnr_ ^O^W zw~Ȳ=sXअy{E|04SRm+0^PTi-"] O('@BKD6 {NmʐzRj.aQcb^CZ-uvpr CѐٱlGNzIveca=%1Qi F>wTLHUGӃ\sA֎Xpljlv ^tSȻ \cPwίwX"{>9V0ټ_`#U8VdTtD_GU9V ұ{q:ObUi7s )B ۊZlzIA4S#x,T3ѱ ԶJ=rs>Nb: Q6ˌ߉J%.Dl2ȱ%ܱ&6XƟ6qg(USok+Po$lwvmi8W_VT18V =| ub6QWCnY'"*aN08wuSEAVخ m3 o\` sHc# fqT .,ŀU|⦍߶/*~48âF,#[:y_YIpʼn)dk!J'Z5=r&; (y*b*O_ULT.ÔD[%s1,jЅ@k0Ցu֯dtKl$Y5O*GUڇvI`b0ο0~oI`b#FOf_$0!i rS/wvҍ%Eb/Ec|U9F-)L)ŘF`U:VK jeFrԋ7EDYpԽ.D\dNyj荊EEg]bÔF˩ք%EGƶ*NX)Hc(<|q@Oޯr^3>Uf1w;mCja:-1_k٘%VbZ˙#G6 `q+MPU~l!.?I_Pĝ"] rT [eTr؟˰ ]\ h! v˱>5S1px fnk}sRmA>d2UAkؖvlX܇Bz1U_#Xӫ+al H d\k/I,k,ρ|`zR/$@8VU^rcG"E7\qtS:ڝUyy >Vc11*?xYa8U`Jw/AcL~|;yj8TR#s"Q.ϊ/Yrx+u6*27fǪC%+A~*Zآ'ѭnۡ|< a1s\ T5҃FZh?EV"sd!@БU ^p%pO3|B5=2怕nwRqR9~ i±za+HFNi>. EWz:V^&YEs5Ȭ N *7{!fRБBSۘ† Er/IGU}APQT]|XN X]FbKjKdO U6[3TTX)|*H'2U0:VunBl  `5/@ա06VNO8VGON@KgjyK?Wq1egI+ I.*F~L!Gf"LD&U 6tGd#fR*c ^tSLjnKS9 Ȼ \ >lr&}+̼d"I va,Jm_u)d靕َ| Vw85F3Liƙb<;dM-})C?Fw*IJ_3UG'+¨[9| >80\+ xJpΕ`p~mg˗%F Rg(6=/r+%a>w Ohght uЍaRs ^d6GXAf?V_mW puȇ S:tŴvŀU#-*mZ5k5r)_x*8ͼx@(k:_TX%[paRu~}#Ѥr %A%`;MxB[CzR怕#H% }8@*AM.SEhd,rKrʇ)br\+! s1CtӒNc_:F*`Nv;ogQFa2V%ZniE|nZ&-I,t*ώlo Lhnٓ'Xm R ˍ-~ά}hs\5TT%~am.>!LcoJrKmqvez܅E9t6FZXgsreHhlٷ+ [}r:̓?W~e6>0E8`Jq-(ed;W¨:Ä&]䒿e;0:|$Ȃ1L-%;Ƅ{dɱL;V[bp>!n&աIJX1$9;[?- й vRCxKVV+#lj@_RL;IQ8ŢΌXD@Z< (1ZRÜ:OUM/vư{'jYXE4S/8 7: `/ +G\ U>]B2/n2=8) B gJ3bcKo̹ʇ\B~Is 2sO/I!}xV&\b<9$4Nve^آ]$LGF@LjKٕyzH 31Հm-XıUXF|\A-2) ' RG6h?āUŔyj[j_ӂ~ яA弆^bDyzǖQ8`jXbsK?l58,?YP5䜭ve9YFznTEf3Ja\,@2,?WYؾNr<V` =V[oB5!Z\ļǪЎr8@*ucѡv\[|s L-+y{5K@dzp`r"mũɸHNd"yc Pu>x2;W`_VR<aӗ&D<=h-Rר|/r _ǖھcߖ]G@Ն;UQG1 '3Jە Q88ASUȿ!:WѥLf21;d9OU᧯MR3V:<}xXh//T+coY5Ȧ4/m0NE(G2[+G~H'5ipӘ͏O +Px SPp.,?Uv|$mWȗ%z+n78M>AP"eFT8;dNPmj<3gL&dbu%HcqzS"0~ ,UIpW?T5y8*?T]xDqi;ȋۧM9zHPKXK U)QHW`5iAz܄m~bPUᶞඒF*6AANsȡ?<_WW58<P2/x"dzb?5 `/8p_q&Q]yiay.).)?u~D4,ӷB ^_>wY24ڗF ި M #ݡ!9w<*sp>IK\vi:I15-v_yT4GaQT)(Eb$oW8K_B!Er x/}d0!޼qx?2ӉCMڮM~+R:(G_z-8FlxICaw^L&w=zKDe278 cMBA3?%YEVy g]iz2Rџ2 hed!O&A6 y#* u|^>6X[20֬(oLEŠsQ e:2x%ūѿq9yXŨgu#%eG\֨e\_W2Vb|GS)Gד{+T*'(X-*V(ʊbWZϖRu|00s^x̢jK.ƣ$Q,"[@ (/K x;KF0Z+c{j@!8%bz/L>,'Y3^gN/@%OE݌ZBxye ̴6>7&c:?r0ee}=͐d*q'AX(D̓2[YV*.=GwzwE+m+)5?T̳YO'@ViUyQ|Ȅ<ˢP_[ӓV9ԁSXऩ[r)D ȕwbHWsuXӘRDOԴ "{kQ{SsOp\PPgAP00uoJqq&C)&i,r9U ">oS]NSFw_fY*ת]0V((bq425 O;TV%"2KEqY-! !8%Uɓj~2՝HQ`6aqlAL8Mh3ļ{#D-UNx΃N kcbj7@xlPG, D¦GSYq{htd8y@PV㪨Sk Z.ZXe̠F-ՅZqKe9Azgŧ@?h׍"gFNL arȕ,|<;=Aa=[GjA](_&tqy*|ukz6|5de$IwO^ hsǍ,C8k~ Y~LxCP `Хޞ7Znm+;jicBlxb< caUT̶m~Fo.@Db|ca*ƶ8N?L?Td$bX7R\UI^jϧ4y(ln' W{m>;N6gպp "ir򑪘Y]4it~"1< >~ap!Jq|dP\GC F< iX%@?qt}f"KC]<>RE3m z@,&8n6g0%QhQ8tw4uzq5p,X4g&y +u;<b>u ͔cP P؞H 7O4MUYVW1oj%;Q2io^D #)ҋTi_rd)T[MQٻP%/ #i}h]Kt)k]'LòŁi?Vq<(RwenuL\,2+j7\6[@4{R]ѵ-aUyLzs|FjۓPW᯦ Ha|!˜0"Wal9h :){հhXIL9bm-:w0^'<^`v8WຐFU:ޱ)J0T,>{{ÌLuyWy ,P]E(^ɦ6Yr-Aij:)爫<.8r@KkVubysCS]NR2ԖäNʡ k#*Zʧ&HsA] ٶ1R^I+.V: 8XsE=͘y\n7µE\b X̐# c N.#$.A)0E`1Q WT6C #2SK|X}tA*3e:odcÛ뼱Rd|2]`8!9BcF5y;p:<\vOIvE2ëBw)FU{n gVˣCXQW xn"{`Omk dF,P* ОmJzP[%CjͬgI,M\fcNMv˳.3\g B0n̒nBic`ߖڞXmu btiE6Jr 8W1{Uz^2`n7pvk%GRWC{PM5<:s<8϶ruo+t8: h/Pn+RwZN,W M¥4J%! L؇pt&l8eeEb&-+PP i\V%V'SpDI T0w{@\%BtA _N,}|`S\Cp l%T[>[73=6Ku.^Y"jF*zc N3IV^⸘NGess1HC1rӭs+Zb[[w"e Վ}A]:Ms[Es`/5mtkhFwrdcX\w9v^St}9Gw *O "Di, JC3ǹL]{l yӇ ,/pZ tyZj];u;UnI* (6|SOM q_rl3h萑(>Ւ8:Du{ke8NCxv_*_ElDE:wd$='6e#y$jϜ=3M }f[@3ruC8LSW 9gTtӎÓg:uXugIیe\BX*fn#s;S/t5K"_9,bUKvp^';fv2W;eU.ta<"8>ZQ[G~k~<9/LBUG>O襪9}z'}p]^ҺkVœ zHIcs=tIy[9,E u]er{Q ;&x%\nysӚ8lɶm|FtJ7~gX1:n<#YFt" +b9t{p+y;T=ɪ>HuA7?W[ 4ɞ4MD S88fg߃皲$xW% xDDl% ?6mkG٦cIqa-R8u'mM1r^WSxp`+mXAFd13y dER63%%koUwjɲFR#fmّoS߫dTb_[fT|JQ _K_} ڽ,D̨}4S5K\AER[p"-P_S(WS8(R x>+&x/S OAfmRfޫä|o5GO2Y_r>E]| q)<0ꪓqTfBmU,"p8B 9D:뒈{dҾM:ѭR( q_c û#ŇKNj7? !H ^n;?V첈+>:#S4 lCg2qdo>r !o^;ܶk(./e-n;X^}M= 5ro6햇B+:kn'G a_4 k(Z #ՅPCGp@5G%Օh۱5]z ։;{9{ { ubA(#>Fx d`oݱ;H8#^1=0#k8 \ Dl- a]q`4Zr#΄ V)_@8]q"oH @,^\t'u ̎'UB^<vZLI'蚻 u'JhX(ڈm;xP̱ȼ>~C80յou Hp!4>9{iD*x9?ڔ=pVo-NvH|^w|ju?׈k0=%ݩo]smFs_ٖ')&Ws]L.X8uK;VMv'Fϗ87*_CjTN~0F}1t"Ј6kٳ+pto-,Wr4Z NɁc؋ x-"V"vOa_(]W Ljk9\+cTh 35t~m+ IiY]wb_5vfk/Iڍ;fK?23Y"!Ff:L o9M*f(Koke]m՜餪zO|F9lFٕ0c>vQv.Q rQ0jXoŷ$ίJT iKQ 9$eOlۯr6ckVdLr2*R^j >r }h$2ub6#(hep߯:XHmL|EB0UT>hAT=>ҺJqFXb5} (+ɨ,&F` 2`5|{ ?M'F_.sܵx0:K.NUr)+ƔXM6JH^ZE/fn="rmA 3͑_$:xՏ~ b˴ܞi,,dVh&C>}9g@Uz{cc:74{ɐ i'CأMqj ;KOq 7qEdQotT!DxOhI[BQ6c1K6d3g}3r3 =iũ[Vr}xJ(?V(OZZiԡgMlsS \,y2>͛ULfmPO %pE@b0J-蟴@4\ <)05''h,?ytg/y-?rZ{;K'EJm*[E]>o"|zT8N_gfoϋ;C& I䷰pOs߀=f3Q%f^d=k[]![Ѝj/> ,G -XCmXO)r78?Mx m[+y&bɯm:)t sqE]WY ȲrV$>1@[ئ_WцBҀc+B^[C(]&nA(ݞPtGB6L)fNJUҭuub]v߹yr]kZk bi#ڑRk BeB-'ޏP{GB-u u ٞPg?B u -ʶ mO(ۏP#l BeB-u'ݏPwGB- ۞Po?B _&߂P{Bw$߂`` B #4ؑ` qy<]N:-?6QG)RwWl8MV.YUh:f7|F x'Jc1^~D[nGeZ 4}5)} 쾩XU=$*3iHAϸ7{NOӿ+gWi_$`ZfUJ~=|$8}4(5?3k\eAy|~<9Ĉ>yp~DgV}f.lR]4ϲ1 P䢎Sxd0-x@}>Ŀ'nJy^f:h=Ż"6RũzbT4 SDCDzNw&]ڄC Zz$/CH1#gd|p," &ѣoo4^2= 'BЯYb~%oej״B>]"ox2Nq/r0xgLs[#jwX!Ρ/4'_:I'i{^<݄+b,x'M14x, U+*ߖ)k MMߢy՘ab @4fd2Sx@84GDT۞Cu!н!r7 '|,\ 62`++*FLO8drl9Y}5WurR3Lf;K-\Oc W ?G,n{ق5Mgrc!*,~&p6 ܆]4aL%v} Cx ~&% Ys.\pupFA x2hQеM[ŔpK t^X8K~4=u_({eGOi-pqGL3w'Q@! ӑqK,>ߑ*B{b#1y><$'(KgNTW,5P\L3s C 0@$34r'! )v!?oWY%0Ho Ck?RBiJcTɵ,ER|ge~Hg :#O+mX{Z6I&t\t4b"t٫=&v\n2hQc u=u1Gnp8/ L%`׆n,({0%60z A[+ze:-)3լ%oHjmHc8>UZwAZƓUq[cIqEexZDڨFE,"x{M 9-iY9+M ɫP3?A\y-n~bZ]o֑F_'%"}%HJQ㙢e"w7iB qry$'? ViSC[+a@_\0)T=ƿ4JRYaݢkUԗR2TW_JR^a" Zܠ1ژQʶ]Fڃѡ>a~ (OGdz| WUœh;$gF\'2 ˜\uRxsI[ >?gl]pK> 6oVɕP+ mB[Ua ܍Xu],3~Z[,XǬ][W&S86"qd}/$p:c?ߴ/FjOŒ/`ԉ j 65TC+ϴt&Jd¿5RK1;.u;UD<潁}d>|-^S-3o~u_5[X ע4V.$p0qx*R&8ZÔV<ي‚f-4Wv\em*w$qy7xq LXm4>7:*D?gh??O~g&?|o|7۸/4:}}[_&)'Pz㿺 M|c!" F+V*ȻB؇n>it]o7p}ݿNz4eڟkHqowR: 77+/[E@{]o7?4kI wPc%gw'ezx]!W+yEL+Yg`Ueyv[14'$vdqKKؖ]IwZkM8:h4W4M.X҂3/B%Ms-꬟0XȀˎǘHTct)qgɗ<H2Q8[Ո>a$Zt _2/!ѪYx>(h2PN Jw,T!Z8jޟ0b`N8˃cSUJR7f-ke,6pE@#Gر7@ ԇEi:{cx< 7edÁFjN̊CB%IsegϢXqwLko,OZb }1}$>VD`եb2A0aVaA1FG CVZ %6r&Mn9ۄ;IY3H)}J36 F0L+Hr,v.:g5M],yG]HHӗ}C bj&1Q1-ЈjFU`pĈa/5=׏d:(=5#y8 (Z@ӪqyŢejP /Z+K!#,֗115*J7 ZU)ru:15*;8OB3%X nט'b$]_s$8j{nv&ReĘT{7KU R% J\GXrPi$X(AHb"w鱅+1Y*f­)ibİM̏ۉ8&I~E;;jQkZ Trks\o/<Ib٭"fn ;4BB^UI;=R]oaЫ-]g՝svQ9]}{8r_qL6gcWbT S&-Smv7JJB`k/z:Bق 6I_= 1(Cc|8|o^ .jY͍chxW^<Pɯ_C3_at % Z u!5m3 vl4xčdZO}Sgc. AOeQZG"83vp0m̎0=gj!j9H>kqp!T)u9Y,J1!e9QjC֓}?bK.DxB16YU/0+pۆG'Ǧ|'*,лCuev!d`+i7o^̛Wg;f0byŎtlw,$h.)JjsVE#[fVj˱;{|m$ज़@.fx^zlK2)f:8͒ߧ2:b$]$Pn^_ =݋x>"J8mXthVlm9;R_v[[N꘎mk~!Oքph*oI % 6rچ1D%DaփRl1UK 6IDX1jte "Dq}b!Q/"qU٪nf=[CTC/$8WtY{p͞-D/  M7 JFň!:Y3GP 5vfpKvjZ.ledMl0Fͪ낟 :NM& 62^Yc㨋~hEv0JƙR+B,IBl%KG+G>rB:3ai7Q*tc:s8&֤)k\Ea$buI<0,KUL 0N 8kӹ(Bk |9PNϥ"1Y- *;T@wl.p fǖ2{i޴`2/K IuH"/8dHՋ^j9C{8# 1@ZBkjDr'{5KF^U֑e_H4Kܓx֣L-cwgBN0b$]_$ ¬zOzl YJc׮*b8 PIWz};ݼnS0z>4M6 :zq1SM5ymxcFxY\ϝGe$r0}*Rh2FI; fʹBoMHZfK!=JGզ 2EFoѫg#_ \RՐSIW8w?IpOjEPw#|#/Hs|V'. qe)5|!f$PQECuxnVlc'h΍fzm4)BFKnQ&[%,\IW n j̅2M'83xN;/띮uXmIp+BWC+莑%^T!bɐ~]Ґ;gG>/v{]|/]ij4ĭ7Z!DN᭓-u  F3usP]<9.1-+_HguQ_aG "˧*9xX^2,8[,VLqHϯ-8:yt0UR5+,Zs`İcT2F΋+!&v^9+ M Jy7+.1}wze$ZY QZܺ=0$Z7$Ӫ-Fk$moz'>ͬKڇ-qR@xC7,>n3 ,qPOE_wJc. G+pAekD H !r>Ă5`Ka3j(p~1U"ؼ?VI3 ^z0`Mg#|@7x{B篏O{zwb^dM {| /48$!Ե2p7 IpܻwK>Î $/rtc38#̂u*1a"Jb$];3ɓ\r2[8~n8ɭ N f;Pì`_1{%vJ =QG&.:G&߽'r98P/hmGNĬv0 m]7~]ݟ 9 Ψ / >+Y%cQ"SIWL[M8M7$8:q)0WyfXhF+qɴ%JZs.aw2v(_`'Xsު{[Gz6Ơ7Q:yI:J8S5GXG̃Ga'5ܹj@+moԊޱ?NA+7?q-ea't;-s;0ъ,o:=x6ǃVG"غDDıZ,]-}pྋ+Rmeti` I_5fMͿ6*G֭N/:6_2J|»8d{3z̻Oq0+>89~_WRd}l; 🿫Wޏa<Γyo@`+"# f~y_ ,@l6sC@7ӕ`ٹ1ƽ14wt.| >d<@ u|/q‰GU#S/?>bzU#k#RD/,'D%#Yxп575}bW;FR)7D(t{_E3S ө 9&"?ɦra9W[2n.ܹ4hA󋣍_Y4( 7,?@ ` ;XeXcAm9kl5w9@l'|q~gs<] 7@>p0{9RrfwOᥤ 5f:[3mqx{ըN#HقKWD.S_XV5!kmb  RL!$5*F-Sޢ$ˤ[NCcM 泝_.7 ^3Jj7[;BP_ MEC0=B2ۇttuHU\ "tG9' {8QB튦}{-8vS)WYD ,l8V+E]JO":aP0oX[kAm uϪ)!_<$08/U.Uֹ9im~fJ~XMb.˭Vd|KkcdU|'[qVvr`PRMn4~]F!«wW1V̌ә4YrxP^Y

ƷիIv~wN@9Вmix{_?&i1IO3 "Ȝ9c \,W80-ûw ))&Tŷ_nۼSESX9w07} a-S֍r !x+hoQN2c_fXZNzt2ͤ$6bR8  +n4Q tkRrX ޳"OA ¥$E<* ,;0$ڼoģ}QZ0EŗM<*8!8F(isF,?ˎ墓\fvxܥ1Y]q!_]΍ N?4wͨj?H߶>nG#8'ڞj!蔼܃sMcί;)$&owK^| CIJ.TDJz#Kߏ#LCovƛ f&-S\QUd(;wu/u>/Ϛu{ Ì ̤@}_R uv;a&|jek=?s1 jzz/\thd/w7srcQ7qYarI#Fmͨln>gfhyeXC<i.x,})q THT8Jd8e"A@H)$~5-ygs%23ke@^݊|1C̨|j? 1u>~J-Iڕ (ifl */i b4‚~sILcIP R%LPW*!HR&$!CRQzQ$֍ьM߇vx-I%rqd7>!< 'շ/d9mQVT$k. /}2`☉ Ht B #]8Jo.~ɿ+~>jqWx?Q<{xlg挔HT6e=n:g m=)g>!FLSM*0G+EIaM;ZY+D{l%G4^Su2U \N=X2/naω-TSI`сwKawI+wdIeD/{qG o5姈}y_|)bbJe )sX ĥdA5"ϟ,o4l)iXu  rgin .9MP#N+A&pKPC cғ2ɩdRݿ.[";K0TRJBbWbB*G8R Y 2 d>>= vn͡>(5 5S}@{0dE991{eD%œ.l>{q=_o:yj`f y]>`bP$}80IL9eNADIH&Y$npHpA& n-$f~Ŗllnewd&%aY,Zft t~.~Ys@/~?"lMϭL 9>Q\.`=lZvxVr_Z3#KUjk"t{Az!7Vx@iu*C bxϰPkƔ<+aߔ C-lpX]ܱeD U,Xi +SBH^Ƣ5ZI-cA6e,Ȫ S콱0ڣG;6KG$䲑݊A"FDtk xy^O $H2Ňe_&V!?pb0rQߣ՟W"_ =ܖ܇ѥ/!`Hx@E.,xGִZSsډǛo@ rPN06}@Bjumy6`iĴAB4Dy>$3zOQ9IӒE@cNw9O~g W'I>)IƯRC2#5FI$N 2ʃu7%$0%y= QQ oT$tYJ2D!ȃ, +vJ!]J()TV7VȆhq #́Iik1E=~ȝ6i#kc_o-zܖ.SeRO53Q x1`\˨#:sLePR hH#FKr0DVU:M@SlHkKPi{ 8<%]2e80nV  A궬7ҺA3 _pEVT__W_dNks`z|]ATL' h9PukPB<ٗZgPhm0(-(tBo"A`aiLT(8MM!,w,7&qєe4җJi1IKohK̓<#T]905<,yחvݰ$ޞߑoAq@D$a6& e LD8] 1*WJJ%砆C*sG^Jd["+S£.3904 "-Iؐ @sUZԣP9F%xYib72"t|A3qlG*!tx0dlHƍ  K5 \^Wb VC4d8]`b%B~Ժ|mx_.Um負!V?" 9-byJ<%fuz(n4c,p/fS6iOZ,J(vмucWE+jCԀw=wE;|<}&0 kyMx:u۩#٭:p/>Q;jZ&\E [-AJ/h%YJAAk!5^vd.EeDz\{RATf8WbڸB \t M`ZP@N(+ B"kxRURr(+"N%d',hV Tj 2(7 ŮCB-R ^Ԓ%FܮlS^Eo rqyBzyEkΏ~rkK.+" t4`¹@bM8  cYUpq0jdV5C+ 1=G(",\!rT2 ~>Q3i33=Ņíq8Zhn-T4'v~Zzg{GPG y;'΂+~.qP`y`a$(8$3wTHޫQB3u>sukm5 X]d[Y3M$ogP ($3ph2cZ* \uT H),Ȼ/v,uP2j=&ϴiaZݶ |+́)>L``5Ur8L <[89PPsrt^UVYeeK[:b8NpGKΤUW10/_xsM}XVWS%nmKOT>?ߥeKKLEW4+{ZE%UU fO'wٲń!egy+BUd0k:סY9_ʃ+-Ak8*{OUBH E7TLABp! H0T#ZW>m w3 @*E uԛFJ9( s9͔{NBLTNO`L]J \ƓS)LAhJqYobʿRkV^2n~Rg|>>?yQ4v]wRN9Z\/R{KM($%WJOxʱM4GzLv4}vzGzą"nxs|W=I2I ̴.k4իJ/E u=ҬvPRC+lr[..>y{)3oU^1@zʂD#lVW&Mw(_q6U2=c5K0ti݊kVB 6~V@ fsHl[dn_:z8M9qi,'?UBY=79kG>bM<ݼX:t&KZkϹ܌8alZ1/Iy;JΊaHOsZpV\/~LG?O>.4% s'ߞм[f~.|蟟= Hm>PeRCŲy.zOdΧjT?WчN (i);6ZNmԜdS;u;i?~;채IC.hA9?:Uiӟ^^f^=~)T"eH6Ml. ~MX2f~%~rZWx;|J\JA"h)Xlrľm>9} ڟcoocz|v{STN߻8iwBܝUgGZ87Lv$R].gliLqɘrґPOZ]8mrl3r>'$5}xu,7nopW.\8ZxKsPG|%uG41s/e`Grx.6@˪\%ƳKRt$Og "\,W1V[!%'rܷfz6ٖoT6/9IʵݩݲKb.S LU~3yǓ\GR[)Ll9+#nǣӝ|;r&VySuL;A{ZWhcE ? -KVeQ'Pxiq~I9HK&Wm;GxUdTuj*Zk?Wv#E(װ9 )},VJ~3ljf7D"y1+h;SL}eڃJC'\2Y}>t-9,3aw΄˻}Y*2JgRTzBu%iEӕ?=D[DF|( l01nxpM&OX&/]tSѴڈ@#h;"|y5|Erj7 BB\.I괪˺~.R?}BO5"Vbt{SDLmuu;VˀF [8J91Hl&/xNnzlh錳Jf6sڪ5)ͩsJu vy%"V@VlzG4'# NZ A>T[[UHV&vMR*q@hk)rEoŖ[7fY6 ՑKrcRlpTsfjw@!WnL.'-XVEhcWKT$ jϚW-\l&(vUdޭj~h>\\r]V˫VSU1[|8o@ڧW J̾ho8XkZuԏ"۳V}TNйJmz疬]o7W}Bb{d?\ j{ь2a+kz3nuanYd,X,gLWfXH30Q䂲+)_9?N>cTt2&Fy)/iv3DuvEsm+ r}f){y!(NIwh0卄?r73?Q۷@QnU WF垣vQZ`Mc0-Z&:Wt;ZQvPYz騎/e@B;Uni}El!n - |sk=|QNf lc>6/9(d?.ױ2uJ6e0[-p`tHtn ۡiݧ3s\94CX,/fYl~^F6`ʴ' ܍^kgYC!]  y]r}ږ$Zɔ߭ ٟ-w3#=F,yk'9_7rՠ?pڃo 9THg8 G^kjE4[kM硵AdM;ILc!nDƆ%cl<K-=֦'ԟ ^8jGO@eEr"`JEka {l!:uaڔ,:sݲ7:lCYajmv32[&):!e- a]lzk3\{K|hXkֳf2兢۸C){ zM~5O.d̻S[ +fu1' I#XΎn2gmdZuq_nHw{G~4pp. M wI֍0: )|5y޼yӦH~]~C)V@`k8-r M-" 9EYռ6qE:e')WD?T[-oW _LMakIY_9vvp,(`(5A3JwŜEW̌ +E"Z$m)~Tou8O0c-{)T8rXO,g#s&66jւrp6b#~cmլN>jOI Cs?َ:-[ӲO鴣xmY; ShkG3hocJGԨ)$."p- X$,Zˊ6INуOaйrA$|iuo)FGAn xiOx:Rx^&I!A\`T)8X] T( T"-f: ,H<>Z|2:~)#1ևRRNXAIǃކ":3S Nq"+B1H!ijSk\O4GwJ,h;<&YC"m׻ |ᖕHJ 7ӻ8}caF4wW:ϩY<֗ĘѴ~m֏EHHR'" מRv<5SGg\#;?휓'xcehoZWav)4CJ,F0*aTdv)]SY}e\ %k<GI3ƃYul~3_5Cw xHVKP<⫚[s1&<{@&kMïdYĹjƳh2>`;P`*YA`ZZ-;egf^>3r7_4['v.]ݺD(qGو$@ 1Q1e.0=:み`LlhqL (`I8iD'xut {\`6b9;uZEDkfGk괪Ϣ!e#ZQ79Z2shՌQB~1? `ՔtB)Z9Œ |;#sHI8ȑdhgxA@'x:,"D ֯M2h'uFlÑʧ ?tw8wnvE>Ͼ+x'ߝ6ZV S7.]LtN31%Ra<0':xi|>óaQBxn^&wGȨ"+r>*-;z6d39bH%{Οu ߩsKwBy ΤM4 Ow>|)O>+ Vؿ[4[, Kp-QwF.MBx%-ֻ/f4%.EB@VAc;!= StB1( -kPTw/ ׶C6fIR#R2M~Ӕu{~2חI^16+|X^=;8u.pcbDK}}o`&uRAp@ Wf>t|СҜڍ"1]W&a6>ߒF ,Ia"vLpk !~}Erz!COP.ث d{ CD6Df%S8zI@*b tN*A& ;\TA΋&܃uSdxq9dacE>!P;L .-2gr2/H:Jrdio0nD>R`Pnֻ;&#JsTdߗH^i5.h,H6?i JOX7\kf7Ն>IPZlEx!9 Y""ddAģ`q+^|7# ֙"S32juAf#gDefx "+Ru{? Ko5Ǫ|p<nҘN~;ò __kV篶{ *]>B/oŲ:Ӛ yiA'M\7+w4G׿~mZĒpi͡$\rB7@>B@<tB_Jȇ4x$ܣ%r±b{t>)_Jȡ>St$7 Gt>/1eBJ{CBW]C:嶃-.@4le9|xKe,*,*,Ѯ#")]dZ$U>X(dp-.ܖ,V c /sHZMx˔y{j֟lO8+/L_#A hL `)yBiYzMoO/ۗ!qdQGY#TJ~ߡ߯KA 9aʾz<`LQI۳~E"GP[odÂ".mskkKrr;ģuuE-)Y͇n?jf/ .jʬevٮO;tfUD)DC0(jx:b״ V Sý3uTsO4 ڑCȈ:leYݸυvN<F1yqreu;LEPG:躎 q"gt"uft֡%, :iD:o[pSjjx>M>MXN8f(##Δ%Sa]6+2+F 0B B pBJ<}=ǐj^!4e8p&WuJ[:[-mqljG["]o!Nve+Vz*ﺗU޵!^:N>u0Ù?\J%FUՒF"MK%<+U<;];Xs:qq:Id;l鄕CPw  [ +IA͘fՄ 4IT>O.H05=dy!mW 'oDYB. }r7x Hlܛ7 H+A^ #.I2n(i㦥7N~%kdfeIOnKP鈵[tlx ")[1amy=nSOJ{e}*多`z470hSʮ6Xu5AY.\⪮7>\ kJ A77n3 "+fd#p|K8yfWu_]FtG|kpLN0d7cuKAcńȉ׶~j@vUNêA~{ !m^r,mS@.W6[9T:[عz@pPsͻ@C: =6ϴSE:Q~ zpnC j+-9,]x'WmW;L%\𽇚;IUqL4㭂;kWr[.FZɌ.6Q)_7/]qX/9BaË IVxK#W;3euHb);/қQWbW /x^Vu}[T"LP+7Rb'[eǟPNJ-q?8/ o~_ w7qUЗ ~0 Bo> /~_l5suUY/#](= 7Ɠ:Xxe)=JUvR(^)[1 倱EuAcE].:?^eE^aF1(F-dŲSހ*T%) @E>vrt1tTy#+eclRGm^և Т:@CAWa%L48J %d,XUdS/Tԧ";=</]m R#D0JtyOѕ3G2q#;?gDRӱiYPhYB/$DZGW˲B}8(t:-2^-'pY W8 2Y6=_N8!9ea9*J7,Gw@y! 'F Zt^]js~ ͏A]SW*ˢp{%Y,Jhk0 e18pHivb!j;v3fڊwP §8mNna"J!Z= SK ,2NM q>b(Fqt`4Z8J5z>i¯(6f(aAf)3Ilhz0+ܸJXe^L9iEd ޭ#ԳHBy/d2"H2(W#F(4Ԡ|Z!Zm^kp{Om}v1-D4H<昷,% sn /u]= ?9 ebJc"ҰKæPBbBus*.R0yv*?3 _ !1wILab, E[8S e&ר,29e}bw͖f ֳ||oFMw4T1:4TΊz,CZX+ ,$V듁phd"o8.y&"*`WHi¯">lcG2P74^7cV(be+s)jNf-b:ԯ\~ݑl<Z !~vmqTP'aW##\w9hL,vRXFKJD>и$gO/gn@CjY”tp&ᘡSot(s2w"\R@ʯ{j+/s0v$Yǁ%**ʺvޞ^G艭m%FƼmh5&G{!NilMȊuqfkI,yl6M:b~g !G1FP2[u@ױ~Pd7|(j_jh7[?,实T-zޖzS?Xnm+~[y\?Pn75ڣB 5mbT?9Ãq#OP(#TlG R)>Ĭ"R&gHB"y_vZ?Ք5L?S7b}p1&h1}`q;79 b30̃=' ݲ8 jvWߡZ@V[Y-iAoo_47?4aF.9 ҋFNHB @J$tf#vgcGY]n2.tesKpѶ,A'Oh+bz(ιV )>sA3GgЕu` hِ4U\{=SXm8RY*}0;Ӂ0i UE1޸;yucLQ0ejΜ岕SPM^ #ʇN0O3!khB+x{`}`N4V@vɇCϨy ganaoC纶`<#˫`HJ!Y㎏: f7:jJJXh>bTuᐳk6&afp@hubs:v&'ɨz^^w3AoocchcLEs56IPS=F*%.e^mnwz.f1cLQ8oT`L^vuNV1 GY+eObt]'I^ATNܭn޾1&FRqQ_R_.?j޺3V1z`/k4~H=l9ӨmN=rNTgGs$I~O;/eBH.mj_O"듘ŗ'>~qYn)ZoD׆l^ttz tULц fYi@m+"fމq`4VSQAA#څL_rog&Birk }֛RF,}: fY8kYcʽChvzn<'26߷̠w%Z`GTz!=8$NVy1v=& ;,gwb)* Ք$z9O8Ɲ]ܼQSg6 ,cnuǘ hBl@~=ݑB=5s4(#MnHYN>^Q@I=gv?<xwnw)B15{5?o&YuRܖ\=]j΂}P,683ƽzv_K8^^Ae^"O064Q'e@vuǘ„Ʊf̈vj3T~3 ۮsBN5MK :vv980mz4{E!m StaǤ&u!_p0yP1 s1x Qˮmۑ"I #g1^ǝzU;[/}9xt|ἺxpTM>QYy:nuV1GZ3{д$}A߿K} f1ںcZrX f†v'66 |r?sy3ǘ {!.Uow^3;ofR5CTi-S:J|'a'b]:Q{1s'mW{JRbcn$ymCN-ZS[E}2p~ӿj4Gwۃ ۃ`c9;$5al1δn(k4,^Ѭ{ҔL} e|"a]V 3;Լ?Ysi5>u`hr& eߩa#1TOԛbbB-U"i||\.2@~JgլwDf}vzW|zCs#vLnW>Q^Fڸ6&8=3HtpmarC9_̵?~윷-o~oez ;Pϓ"|a,_Wpl??!rc':.r) @5.mȯx />Y\~m\Vx.nú,k,j5KJT["$qKNuh XvZlfG8—_35~r@5~WGAXS+sv $%JF0N>fM 2޸HBAwRg5tk޽΁b1ĉ#(I ҟfG8?W1?yFw=9e@6iیw ;D@S2Z]S':_/Bq?Jq8,{`ںQ}32RG !JLp+uz?mNbUr{wP1o~SW-? 26Dác נptޮYvրj7_oH f@(Q@?[go-3^[{ss+v tPubglY(65A Le(t8zt6xx'‹ c7@U0--Xɟ/^`-Xpl'.@Z1mAxQa 'X'u>0 X >LB#VRyc ^_%vr!^BX#Wg \ \磗5@}7pܔ/GF''|?^RU59҇TG7. ;"UǁQe|iupFǃ3ԡ>8H_qPk |.0QuO>o] !**\$yWy0!Z@训,b:q~[57 ~~;ReSp5<^?_. kzFŶk2iGLy/,Sɓ!- +A'^@I䚒.}[v=QI[ g'hTǵ׌Pn6<@kƘYhTM+ڣԕTs.x& .GH mLGdԂ.Wԁ` g (/Ux r;g Nc6?h~GuLJCmYќЎq J~Z\QCo8f5A*!hH0]DF"@n~{R#\(yFep p? V1NdSScjT9qh-Z&HK;G>m[mkW˜2NL2ZÀl[L)N h[;YN4zʉdq"b 8s)TZcTÍ+nWP ,ܺ t:I9W>/|G:UDl^@F*FkŖˡ1d7 GSc@5m 9P.#G)Jek`(0[U {k'W@..@{D|cȨb'ThT@?<5!}򛟫t7Y :TLZBB?;lՀ81U~mٸ1:I/gY$>@D g1PռiZ 8¿^0#cJP[}HG l=mÿn^=$zkt)|5,E{hɑܫ-W]:Ov [~ 4*S 8<]̵E4 ?5w݃" 4jÄx7~v@"{`v*_Ŷ{~~ȵ@28B}b*A)P5:UlGx?7c:^QGZ[b?M/+Ӱ񗦚 3CjD/Ш +pc|P*@28N0GXlhv&wMeltsEƎy\Qj|W`v= ﵉wE;9_E 9gt=gFbYXP}{Ƹ7hT@CMpm"~@a?yFep DMXp@]X S`PZrz0CDl)rՐ%}<.]QV9w§GXrDehxygoCBT/Ш N1 10Q` h1< Emd t4]ph- x#|n;2gUN2|a:_Q1.ݭ:g\қ~vʨ8^lTQf8eu2!m@(08[Nz]"<yN| &6s}M2N'rу@28& lL o$ڂ#.q=a!($WQF6lX'V~eG[9q>< [{IR`g 1$3A#|R\Q2=)q3'~>_gMS)5J2Dש*,$ZuBy@5j9ɤ TCV*M?&28X3_-hNMq+0%?=d{Ԛ-KՕ@_\lv( :G# 4yXFO[ laD Nojxmn̐v;/?Dveۿ̭.9;-YhΊS~^M}hExZoYPϱ_5'?#@xqƆK>TѫpoFWbaGĥoq!(@6< W":\.dO pW4}`[[˝Ʊ K6x1_͗UC|eV|^ ،YNQk :FOރ6m֜0l[:r< g~\=*dtG)ecl1@8Lx)ԉf$VDhTt33T,ߌsbp< p& @@\DO3p涤- 8B7H. UmLؼF}x5KI0gq|Wާuavc7Sew/bhju.M[;Lu6pY/0]x8ǣCl^>R1{S3B`ξTxmgZ*zr7@LԑLZ8`29,8Do.wu/wae7migy_79Z8cXB|}rQT3h;x>IK=GbOBg3qzȱ{/Q Ja2 yjQ +/w !-FY%qvW*LX(&1V]+WgZ y/ SITE$"6ܱ q7KMGoy)ԑIv:=9oO?zhr զߔO|*`R#y>& &Bع ԥIG5ʠ4KWЫp w5iQU+g6O= 1wI}#L陘eiwKX1֫pc[qnvF|A[OǼ ?-3\ LH&# V1ݽ *thY&v2 ˯VO +|ƽB4|jUM>X$W.:Fk.}y܍(i9Oɶ4rq~G>ۘz$vf\g;EXβ|xw1LMzKhCa tvk0tc8fo3Nxnjb8է|U4܉Bݤ, 3f?ck?vCEn8$ ~JPu΁A|-č$ SbI(2a,8 tB6&L![TəsS!\G1"xwG#:M|W?9$hg53~y+h.O68܍}JXPFq,;6lnanDu{H E=3*#Weuki Xv)PSt)j%DdqJ~21Oz9^) )uU4,Vф16l}KS]l."&Qؼ/6?ؼ.)a榎jǨGt  ŔV[GA Ǩu)`ʷs S>&*7 tQrk?+~"L7~ILyy /ms'ech~ҡW}Gq`Yt IYqg8_5=}-o^s7 '~3`m[2 }HNf8§ފRu`}KjNyAE,\k ~mt@I\{MA odmRg";?eXG;N}(悆&IK'RDIcnJe=D5Nn<|R?d;C姠o:ӻ-5b8r`?qpm_ˏjq /u_sp  gR! CZF 21(\\nS:gi] hASkASe͗}c]8P#g O]9\ I!^>7cW3#+R%ФJQ BL%Œh3C B͑K ( El+O&e&l6H`?$[9Mv]r|TVѫlB+;Kwt:~=vui Cݲ}Uد1ؐ( 4J8QiY#E%Z@H*Ss !Ϙ QЦLJ Z&H*;|XGO)Rw/$j (0ߍz;9h~-c&gw7jB(d  ^+q>^u3%֮Q8"d,X򲨱L`z.BLB]\0$0_B{*[ f 0dlOyj'-[O9aֆ4 =vQDƩI0'>mTWGp̣Z*bm!>,'8e@jᎤ˖KhXZpW MxDF7xr;6_KUVt) KLDC P !<%ggY&.ųߊ?ϧɳի 5͊!uIi=nxջN槙ϯgY989PXulѣ\/kvҫ#y2;H,RRTdS\jbή./%#F<BJ!R#`reLe$T`QHCQTuȖPUּ-Ne64]&{ٵ"<(bَce"%b*XhX+%X#Y2Are 5U5/~g\[`:+A6rYMxV`fl|]?X~|Xײ>_ne@r^5kY5 ^=6jaݫDLSL[ *"h8!2-U0IZZ \A(\.VbBLMdY F 1NM xt֝J yL3g֊iB8ˢ4)Ma #+!':(QیH]&4^Px6XI().dLX؄$z3)Sxk;ONrZRnPI@GϫB0T?9컑LE"i%🉘]u@:dA)H K%ga93~NJ\+5(^ꧪ  e=,_~Z+kogB[K ͨ YۯWSS5qBPLšT&wygCP $|Quf{}jYUqq~y>`q_| F}r 6Q*2c`fV8:~q n}i!dYLF8L@u0SFi7m7>ouθzs0^jX|܌`cCRgh@Kg.hG'LOp´*SPE̜v)z }x{3GGjjͿXP~\?f=|{E{ 7E:8>d'X?nlյJɂq~N%l*mˁm%gi~ ǐ!ϊ&n1ʼnRTJ5b1- 4M'ql"$ 5TF& ȤUlS*SRg+w vT~*+67__ۚva &3g7ݵ9sʝ㐁l~[mE%o~~FZgMqs}dYؔ2urӬVr]xK+}jf`^,QsT vy"ܾrQD_QZFТoLjܠʏɎ K6hv?^3wclhk}V2EjF Mk+tf# faO< (HW.~\@н h )67PQo@T QOE>TP@Ѯ`}x!} PHYPbx,\EIa`K0caN),F8fB@]{(шpoN:{} wkUzGd#/~@bHzrٳx:-׀ Ƽ:VZ;crɑ&B~sm (po7\ p5i7\ 6\=s*8):pGɥ1x^<)wԕ<}dp9,]NWv{l@Wm&hקbu#9bcn\m$;i;{]5*{I5@&0hG'^Z`*Nwx eه|oF0b)zkoŊBe.ߕc:z4 rsYڪ(@pCJ8/VwoFÇ/~J p(*CN^/GA;?A?e;hDgN!"9'ݬv5;v.F-h7XF&l 6,$Ʊ.$ JM]b5MIV ͉JjGL?6#xx-`cTvkyouf$#𮔒g~*S{|Ayr$ře4,:@ &ULbLyzڻÆ?yyYuoh875P)ΪT~8@(w!ä1Gt>4pz@&> rC;X#oNQ&IWqij[:cN;;0g7jxq~d1^2X~bJ߰Cr1gZ49Kz ]Ў56np볖u*PwcK)4Gpۇb{\ WMZv&'xw^1=}㦹RqPZ=qi:.hۍⶥT}U~1kkZ#7M!vGJkMp϶&R^jQ>L~WMFgy,k]}:/x0û(e|-{tqbNv#?KCKqј.\";gj6)_؆4<Fa0i/ؾDN1 ?j'Qptǵ}>swIrߧs(!S& ,Y-QTD8IYŕTy["˘i9$䷖7m.D$&1RKM@$m 4Z]!JQAU@~Vud4>/e_[Y$8z@5z=HM5eKo= <2}(18 c/_E׾rM]I$!d#{pDLc©QIpIXb#W+qAH`=vZ=@34b Q*.RjKz5,b[vv,Nb +UNJ4gӜ i&4|ʓi` gye,ܬNndzQ,1O}9Sgj6"fԥZzWo6w?] `:F Ճ~ָϚ<[eM+kDr69䜝NNNl#O1mI-XK\&/7n; ŦTyBOpTA`J0a'%#̟4^tn$ [K-ۑ1FlcW p9l'l>0byjL=+,Dm*oz"{vWgiݿ[s?8x]8RM@4d,rnu5?5:mofs׸lMjl~3s0}%сEYqZJ:Vէ F5 #yW!v.֑^$wL/QC`1(s,XLy׿>k@J!,@%ZU $3pɩk 7i <'\ak*P;rOoW׫Ak> X6ܹ̕2N2:۵g%ͬ ~X*s%.tkseօ=(mף>\ f{c\.ܵ.^9h .7{#yM|& &Of㔜mNo%WWg- QM͗7B8i5Vaʖ <,~0tL96$zWcX+ .wf>iO' ZAgp8 , $ǒ F$N|cz> 󿈡'DBZP|Naxi h:|)H-]pRmbZ>P%Q9RkoԛT_P,`g7#ŷX^L&{z^O!Ã9̱S0ohyjyjaIXk"ԇҩ&igӹHu $ TjC(U yUpR#]@bqQ"h^= %9X%{OڽG)

~[ ]x$ħYLY9OXX ENrw,1}?,{<,Z<dž+!| Oܓgvm?ES *;a~Z@նB5nG3oט]OFsHBe5ɩH0Ѣ](a^wu;NHE5^9-~P(M^]iS1M*| %WTSPLJX+!x,-־ k,J<*"뉈9V| Eת3]"jG $ z~P(8$h,\VSA\9b̩pgh (`^bK:f!&!J!"1^ZCJ0O);(b ƀ gF)89b;P(u{t͗|Ti2Z6TNs>UB C@}LsżabXN\ʣNthJa%LB71 !#9ض`*亻P(g^3d EUL .6)`^c,K[HE^1jdYI%`ڂP(<ŲO84 i 20,վ%ЅB(}G,qL& 79^1߁B FEDshiH s=| %̔ti,[R$R.lh.J0/)~ AB>@iȎJcpL)ύ-wPy?Ӷ޵>q#eJ_U#{T%Wqr[u OkIJ5f5G,t%hF;`|rD'[ؾI fe3Vhia Ĉ$KF| l3b[`+y lB'f52Y(COqBa -!MZxd)" Rdb5i 6 ̌(ڜ*:s^+l;hB3eVx.PfL@q'Ia mKn ^p9ۧ+,|F2KI ]/ {b90'9X,*&-tAkXfHZ+nGʔ%*`D/"!9I ʧ`h(YePU(8wi[n1FbUCd] Oz"^LW>q[/~Ex{&m Oة'`uT0[oc@u$$L%:BZ&ThҎ4ӷks 6nNf~8"3;Β'T9kS_;{>J&Zp 7/VJK<^Ų+)rCImdLׇ̖q\ 8E2ENEWyYL>F#׍|\O02xA7dl'gYS:ojMhRApD2Yכ˼WwQ;MQӑÜ]U3ᕡ/~?-U[b>';vzt0L{~Pc\ϲ !Mǣ~IOLPxP rQ#c'ʭ'=yIO<逐}ʓ}ʓ)OZd-Ii@&4, ZCdjKmZ[}Ґzģ8& 8E]b``)0b,P&5BBcSs9+5FZ|gz뎮=ڃyn0&Zl@iE%N8Ae#4_H )04|"" O{lΠDuSރgn2֓Cݐ`g+K= 32<X {( qm3yOݤiz! mf~=,k6mȫGbHVσ,zc{]6bJaidHS*PE'_(y@9^((-e<0"QA,FBXyK@PC+xN+bEQ*Iq-KVD1NdVq(U^%1Hbe2fZ[* q&1>C%ZMC-r (=TԖ '`KUQk4.׋Kۇ_'?^_Lfc%[ϽANϪef9+Nb0|1-'~+0z o쉋7uC7vê;cݬ2;R? Y̻pпOdg78Ve{Mr"uq.#cḼ4s10YmCR +~mz)y%"*e}׃7W G Ѥ^.;ăPW |QϚ(GQ-J*{q{3o@I~x{懷?|xw'o`]MC`~i7b7]sU߼kyeT&IK7~a\CB4}_Tga3>(P'\ީp[o_UVuw=ދ{knN `A+Ω1:$IJX(L,pK.R2N8{F32ׄ?wdv@-YBA =$R*WkI[3k=I BLAj邓Hl*AZ^DP@MbԒ}7[wutל"Kd5rÝ{_j Z(2\#{c9D.>.InB.4khG,yq!fv$BQ1G3dQoNo9"({! $9)R) ֆ94Dp&'cuU9/]ƂxjˎhaccLpc3{ꑆЯVs'4:a04}3 ACP+ ,70E4 #',_: A\RmE`eJ* 2m( 9v.Rĭ[5uݓ>b+vd}v94p|] XS9cI37Cr zQ.g%s)"d>c,G.7[i{{Hxldm^A[ ']3hﭫ RH)r|Z &SG<|Zs~zPϙO8g֜*$[~@Wep48)z֮հn6 'wk}Bq㼻[sM,Xn5F(UA5CL {;jSt9X5 `C@K 1HL-Hy kK".(y H(!p$\SCLPQ`T Km粎g4YN{nM>:6G#eư4>Aa[QΩ;o &㟢+r/Eg{͗m%}sT/lUCb5MzSk.4/>P# H@ƚ!nC:pNz`[gkuAS02&gƀ.LD&$S#FkuNTIYڣ(mў8\HέDgáa>F;{`{qgr_;^qX-#,VQ[Ypzp(_x+~uЇZvWhӳu@#X.F稫iχ2+&Y{8A) SbnJF4CfKPdYoq)Amtrq<+"?d_ƙcl(z9{Q_^DA')IB2`d)/-s~|s|YY \-,̸~_}Q9潜_&Qd/L Ӧ^O3+gEB1 ?o!%Z2f, T|a.@)NW8,5n=5ؗgk3egۈqqccr>S+J4Nw?Zw l\*LYR*AIWV궟i- *554WfwPiea6 *(т֗,۲*56+֨hm+ְhUV*xvG_s ֨lCN]e-mptQC 3ֆ7+>rKdwiUWϊ̖Y ]b3G[v*SroE^ly~v*e~<Ne[H^. 5+%}:"tt昸3Z8kW>#;|P98ΕN톻wo<.w|߷w==JɢXR%ʝHV %Y@32K@:;ֽݥ>גuΈ^xՑGKShA=Z+]6sfmCe.g^&^GQ);. ^oD7NB:^s;Zzog.ft)sfnItl~@,YE&: uFxhGbQT,?be5&=k T/((iXhxhTWhA䖃ݣ=;knc0zCVcRghvW8Y~GbH,Q^`q,1,ij6{8@"@ec1֔-\Nj7oJ:MYz'paqɹ^9Íܿ ϰy2WXN YXYE p}ovQdGz~џ^L_Ȇ*fR0AGՈ?]Xb}l<Йctfs"v"wX6hwM=֦ELճU?T^J5EOW|A|= _ߎt8OPq)OlE>,^hSZ +"w8ԍбŐfԚf¹#2$ɨY>_=lYec^O?rW+tIWK9SA~JR%2`r)w;.պՓ{p1jrڒ%GLTAJ>0'1j5hiNlxp) tW#WCn1bwF:SO1jwåOµcFi[o+ JaidHS*PE'_ y%m9^ηioùۻPIWQ93ny`Z  #!J\b)56.Q 1CJ+Wr߾NEEVhlT& ,Y-QTD8IYŕTy["]mȑ+p@.@8/ddnGg4+jtSH3"%Qڢ]U]tuSˡf^3w4\J0T& =EHKP%VeX8,AXRBÀ8`P4?9>Xa{5D(q#@q0 b-0@jw uk6\B6.n)w|Y2oFu-xH"$^nA׏zS )-ȫތ(bތ~ѭ`OnoM ȯ0t~~Ol~F QTOʐ+(B W9bsUMd?+/pPVH)7 W,IS׵OAocs˛g:'13Oit }#3_e7owk PF׋I hk]qT$a GvQ2m֧ۣGj~^>!{aq ̶^&, }v;e<ۥn|m)I4Φ?>,2Vʝ`j$!j%Z-N SvaZ7eigtT(:}9/vtCR+RNI+ Hc$ }2C"F  fK:F جQ,ƿ~|njlgw-Dt陞cI#D-~dt4߭NV,x.#3 !H|~xwͻ?7<`[+`|+| ^`K_\4Am X&YDl5r)7̚5w˛"H=>x&گ gk8ع>ZG/c&0nⵈՒ#b`+ȶ~Sv:HI-S%cm~۸?yWk5^hnXM)K8*6N^[IR3,Cf虑"&Pzّ#T. Wn@YJnadL0m5`a< N`R%yX/## T3N@^i'֒"qJ|`kgy 틐|]m˨)U)u 8pVaZYSe _FyXē^v,->:_^#V$+oDiaWۤpAcV>W"*k^%QYG|'Xgxڲ E#[#sl=8dzǝzXr~Yj̴h8@@yB `$) whX٣\- oqey "[d":w4J?ÿr%Oo/BJ_mvy<&cRMtXa>MQ[^(M[y$+{}5A2W[y<6Wl*D,`尜$۰rlBKX8X[֖ *ܨV3b/k\4z[@(:Cj'8Zqč4}ɂ/q?r3M$Xv .O#+Ź繛.abGy0E*3S7x]yKq1$hhps&=%qt$rߨZW7AM @[ gT:CFj[0da^ ʵ`VBifLqDBchN+KRdU"2Bkk`5ML7N*IQ8LL#ra8Yy0Iv@l-xIZ$߆ 4R7>RQ Q10& h0yk FaaiUX5w`֪ %(`E LHG@; +%rVyj!WZ諎WFsK -#FzBY@Fc2G8V`Mqapq@˒5cDCιUV[ɠ:[(Ul+[WlAB/I(-$0WH4ND3wOpGR~}[7޽8 300%Sl!kF]-ަ?DkrV0p®Z),Hk 3ZJˀb`VR yX+-t-zi5My*}i_n]餏=LlvGQ,.>ߺhg*Yq^F?#wtg7g:/mtyZC߽|^FNXWe},aHǏwTZȮ!B X6'HʑT]cfd/jz"2NS2|; pm\i{ժH ttv]i8nc[SÄRs '#0s]R+FhpyL)ѓ> 5ȉXKBO_$s('#FR>jH#T H@V8;;6}F#A7KC3{ff5g*ݱV>}m? CRv>lds\{=7Ճ=Y!Tk-dXXU/$@v5/qDMrLT#Y@'DhQ|S342`NX= S T,Sc~Lz'B j(#yqJ;m:ZZmwvv:I<| {ȉ'ʎWOaRO/lB]A_TWh-dmu ׺#|Cm)e c-,_{GGK޶Qg]Owu>j1EŮkWX]F&H*lҗa o2 V\\܆<O֞ VF^ fU#1lm!ԡ85*1@PʪIِOZ~>:ҢĖ.‚ m KLIAN 젰 @qxkCptk1I\v')xnN'cln=lW;yk޳z-I *NR^;0Hx Qð psGUsfÉ5p<[BH7LxeO[ U4ڋCA3QoӒU꽄juvd͐Ǐlm(0mfi*eՁƦjTPFFj F;+e|t-0([z?JB*;S\d3?k3V̺*H{VV.<|}w*37ll7|AΜ;o7ꖢ,p蠤f бeW Nkkd\(~+{d!73R6ߑ˹=p$Qd.:8I-B % _{ln)5z th?%&2Sx߼l%'3 +&yVnˊl?g 3LKczn^y|úJv^f >8fQV3㥫_?|_?|_'gnf,dIuIx C+Db & !&`FzQ=W+ި+׼K=Z>y-Z=+a3'ΈhKJg5H ’Rp-0 Z0ER Aԡ\Ka19Hes (4A(a,įAL0%<l*{IWRgx`U@WS?gH㛌M.mru4S#&mG J]cEM5qH R숵\Ds":ia :Va#* BbƒBPBcLQapP%VKZ+NÂF G1H#DP,"CaF2q*l% $ECc8(@ᕁ;o(>|@ R^"gr%;-_=2Hҧ91FHx.{ CeHO( h C@['6. .( fcg4♃G7?Wwc`nUZZnfOX)fMաۑ u:훮in~Pa0 d:B|" !& hF(q Ѱ_zQ+6A=Z),mqߌ2 ,/TCVJ =쎶;\[E _U(SqbYEXqfؤr_pn?ߵwu^(0@j&%A5]W~<οm$+ai Ivgff 觭,iD&)Y/JrLɦIlWUuk3!WHR ?}^RK(4ژI,Rg2D'(YKI S"!&{Y9 Q Ƒ=aM<;6ݎFA7@3fqlS|?h?hgHRUϤo. +{Vq*]lѯ(d[~mU$[Y"n!Pl8W-(`^rÜ͙A6ƒE ,ӽ2u&LQ1G EmȰ, Ƹ#aREbD hQcQ:u5NgMp(Yw `[ߣ}"#aqq8G . qQ"?$;*d?+Rt\vH-7X~jCVVjWuQãznQ}]Λu}4b@2\4#16GBG/&O1+_R49W6<:rFBB3D9GVc ֹ*yxBqR㛈w&Ƅ2fkd]αE#ObMQk;;/f^g@0! H ˥CRLp}C>% Y>b ]61&RQгW.fדhj`Dٰ8w]ߘ!璀4Ԧ8'(VaTYɏʜ귳 T77lc0a~`˫jm_%#Q6~~k0 ={b|{OMݐnjí,3ZFrLNhx7_`cr*ZWv:dSMse Ezkn\FR/KM!,mgak^J?plҏُ7_ތAQ?y;$$؍,YJL<%5OK!(GQa _)F &W $M?޼ o?} ʲMCC1ûfyTCmκ^CmN߿ |ݵ/E>b4w0eM6颬o~!*x2~:^KX-ǘaѴ/@ W` 12K;+à 0wHsIMk4OMqx<3?c0o9^IƈGp7ԧ4ZQ[g22 R֑8R9{PHz0hL>D/ 2*@$Z:jFKЗӁj۹*fHm<ji'V"`ȲSrWf}5Hw?EU ػBmubݎ \SAOva1X>Nm%)H&s-9˝R`&Ae&<_M%>,I6|vŖѕt ;lGozǣxB4Mb'9ڬw-Xs>5$PܿsT1OOJZ vYq^vQA6̰ܡ'Ûo?O.[ЛI39b^Lbkye/hhri$4e+gxyB ىa_y1@e+kWVM6<ɚ6ݫdD|.Rbq{)e9rʐ$П/H"88fS0 A0 (' )% 2Gd1mH؆^^i@ QzHLdSaDXҠ8KFbu k0ZKZ/iǔNKEϢN<;vþ낷xze3/>r1;p7r)T_{[Xy+ufY=ۻWg!*gRbrSܟIMiӁ'#wJfID"X,(ֈؔ8ĩI wp@ׂJ?HeK]*yw>JXϿk?˖r>{l0Q3~J$ ӢJ‰0 LזIL?edm8PlRyΪ $dIk*7~IDKN7> Mty,_8giFђoZNió>Rf6Via?ŒY/x*+Q; ׏:j9~ؖCB}`yamdq:żqggat cL8r4w&%0~ԨŒFZyI|rxO~(v!͢uMS}ܸ? ųk3nJ]9,?j j̋Gx9?C"t<*0eVF▱,.(K׹y, r#I&V;x4 L {&&VK!@3f4j1#,RFXp4>C;69}H@j]LZ"FחB+ϟKzٖsMpH*-Zk{Wm I,18J`J8kbu2hgqb$e)O/w^ 3XS[`T),D2""&FQ4`pHn-~lX\Lۺ)k'Vg5̮wbO퇣sph5Z`(3*h2:ڀmT 5 ѡV1gd~M5w:ˀzphδ]gbRQqQWq5Lvb=j#C1VQsxG,x+"x9 [*^/z zHQ"Eߞt%x:}r 8M(rne92,"1rbrX+/B"I`pc_ƓЎVE,:5ɯ=lVMc혏3Ւ4`&5'`__K?t͒sG)Y-%*9y0e4&2« `\ar *7lv+-7g o׿Klj.u DNG}Tk39"kmw&DVFn ArzRy8l vJ>߀VLLNk˜`*mkmw&L$Ivℨw]S;d:N.F>L#%h4O6~<䵵.Gst:BL;M MO|)8qx'J141(R7ij(rbjkWL(]ײk[)waT]Lmt@'PS#ieӝ>b*OahtB?<^ӱpz/];rY}[puiJ2vzhW{.UG? Mg5 7%7o Tezqd%LQk>o|TXh<]ld͊fZwE,sI퉐7 ]{'e-e\n`]s^n` p3"5SDlDX,-,xGI4 JTF:{YEsee \C򠹧f04h<{RۆTrl߽dn^0{|:\Ӭ{ m'^(N,^&6xy`j`CO6tԃԶa6瓜mqI6Xr!wg˭nH"o/o|W<@ťw,L}!mzew79LL!v 4[{*魆9$uP&ǡ!G'JKuDG__[r|}г7t E%I o@.k09 +rݮGAYgo[}`qحu>1k, )/9IvɜRmD2 F%mcɿʗ$܇!q^^UWǛ/S CP7h(ÕN==="DpH&-!`1J/:|m 73.b+Hit _2TDTq B' L,&wn+efyBC1`0a1YAѿ!\{G1 PguZHC-S?d$>\ fIm3 &[U ɫ\+jXJe~?J_'\2KY.8Pbg~ p4TC+Y jV #,U,WLg7!W?:?O߿~s ._O߃2+oz6@/A3h7MZ4)M.yX Q]ڈ)zdy jr¯Fr`|!㋽Ѫ7qv0~v𺿖||cQCՠI6gwPmm"rMd *0QC{NjDȔ?`蔋Jsdh| Lj8 \{C=h2Fm.3虑&+Q#Ϡg{N"1Pz+Dk-!^&N* F6v1þ;ֱIxb%)jD 6v'N?ʓ|6Դ “yRRER->`W JSAH;PM>| GQ;GYΘ%T[e O}(>ݶ3fVϙQz|\ sڦZj%e2y)Mh`QԨOPa䐤XHMLcA)ıtZ+n'afpgZr@MX,B tXD{N^*'T@A;m SQ4X@<'I_8gRڊpN)e{f)((։% oJЃ[[[bx;^\ Ҵy9iez?vWii'se&\!e!x0k5f,`ZFL&Z7YHKDSIۼ˻+]v1$5IG` <* ?s~]`XZ!,dXXH&x;틏Jbh01kݿ÷[;>;0Ru)s9ceOesQ9o2[(" HbrK%bdF/cL)lD^q褫tg6MܓU)(}馇Z#Bn~w:`Rhd0>OP fh/#xHHLBDN&tZ@ʾ \H☁ 3pe  D96۽>rxAӄmX:dɘV!!83uS/Ilɝ4(%LR#1s:քHZvIZ'imJIKX BF;ǂ@hn*u0:oW|ëwFg_SJ҅77LwWs-G``tV WM"DDaygӑk7o߷Wl Ư AREEs˙*r2h#s> LOW(aԯi5_j a%bWy9pøa,|ʨH SPQ6ODI?QIO7zV>iKgM&Vn̰8iR=n7av);?D'IVcՖmN 7,=>PBBmpy@n5&@|Nwg^VcSt,Vqj=iVD]Cikv'MVv:]^x1iA~:kn}[}*mrDam)Io+#4E[_Y;6I@5u\iTЄh|ۓOaڂ _B$юQ[gEoܴ X U[=0oV u2Ijz6]qHm!}x`z$ Bj>tݙz'EmWyDВFŝhChW9ƥ`Hn,C2R(1GCnu|OvR՛q|=^OM큡_Lr&ivBq Z*`oWOo瓷?? =<q<>nx#/FfO\>)Z]Vs: 7bBv::Ě^?._}1{s|~,#>U|5 sZ4b/݉%S.el>UyEj,|&YWf N/ ɫ9& .'g "d^ L-7YZ?Sn|cYVV{E=$4GܖdXqtٺ7A(VQaF,x+";90f$t]jl>K$a+I|Є7}t=:_xYG,6FbYyiS'YoFySV0%&Yه7v|vx4ΝD"s̙#8W±\㨓A;0H7li @Ngݟ  ,[V-˛WY ^Zl?xF|*HH+U\r#wg~ ŷ7_VM}$!ĵLkUq]E:)WS]d-kO_kZұ˔AoF]-t-B0j6!j9;`;Kl xHh870**+/2i1a0gA:mU3(z⩠982~1k.]M0*Sampi GV F_Mw6(wW1!j^Ly`01 2vf+#wՇDfTXT`s)E|W,:&o QX#k8!E,gCUB1ua@Y%2`Y05#퇝rKn.AK.-!t7 @xSQ4A # (G"B Th(HVfp;r7 f/C]d*kv O7)W0uX: ?wȚJ.a|anǽ~W@d)|B2Ÿe TBA>wY ky5 Q3KIԖfpɥh%cF)W󟲗?lăDK\q蝯ίOs;xld-\S8N˫yl=£Gwhvv~y*˒jS7G7x6wPԭo bY.²ivœ7nތR}d?SEdz!~F"0mbxHb?7u{?XKtm.@~Ɨ<^ a2_ԅ'( @QԄQ%lB Y1ݦy9MzHa|_~5qKw ˗?ݗ$cpLM6F]R÷*˭t[0HWE9C9e9k+f%K74[u#m22ꦗ$i o $oӔڋ |K^N<o[bB%1W;rVOD%r{~"742)DcYs:[\z:zWɃ>%.+KO@c*" ZL7Gݝ3*I1O [^ lwiZ}Ov#ǤO :{1[/C 1URz:9~`yZL[HtHj& cQғBFgoӺ_a+zN[544Vx;vmZ˗7p`кw7_tVߝyfzͷUܮ;^;zܯ7HƷlxp9{ٚڍeaV?LQ}H/_z^۞䀞5$ė}ڞLtє+B3Zn$BF׶U ?_'CN͆^ً~yt2j;gGmf>ls3y)BU#bSIem'ieR.)b#TP>/8Toww}Y3i6qTB*蕱"| ޱ:$o)Vk]Jr4O3op1..Uryr\/ ?!J%0O*%Zdjsr!KVOwNkGd#9hUm"vMRJQX^{/ _jT }Nu0:z&5pHF-洞3aN9QXy&ʹQvFޚs"r\ջlłgA\\;7gz so eUQHEr$ON>$dnCXCQx- ?=;Ydq|[",/Ǘ>j}ދM ?MYok󇗱>c<.#@? [7L'쏉3s0n'3}:~~ڥ(OqQKB~ԃ Y%d?Gu2}Oelw<YPiw9wmMۄ}9 ggJ4*gMm^n]&y~`.3}gb%֡i/IV ukSe|Dsrq%zY3!*ւꪼ|;;7gvj;G Ų8<}ߛ09Ҋi$w[fLerA%ħ0'ZY){~/Q$IXR[e'ٞãչGև!D|k_%tG ;"׮[<ۜ?Oj'-5f?.NO9G;ٸ+I}<뗻.{:g; n -rB۞U.ޢqp::_OtJw^ik%Y6oÅSiq~rē^9VhuCgJNuB\y먝E\n{j,P}'㞪ljaPm|1nÏ3pEͫ9=^]yV91ȶZx} ]u)(%S9h2PTո@&QPW~}f[7k۔wڕRZ?Vzb?n^&.ric.#k$jjm砕)DMAhe`;mdzzCdrEm&($DMjm1˔ fjE6!!g $w~A2)g}.=܎Жo UD4p#!){k^cmDu}J5Kjrتe&VLJ^P#'Z rKLF#эm!1N7*&Yk`tȉ(Cfkg# ̞㽾z\@)[Q A1E `CWlЮ $3C )Ɗd&0SAG# Vgkm6: tΉn @l0a`D{yO~ߗӐ& g]H$VhiM,GDyJq1O(NOMf ?7MU(9P՚s@-NTcrS RPrV`.#$IWx7xS|ۼ Eqp\@K_KWRCzHK-S+LL@5 #Rd!>]]`R\`&f  vy >`"eFmU{r%Ō`\!S `aHS@$$ yD&XmLr5:I(;Э1J 49 1&܄Ks$.m}oJ vq66@% ac5ne `IJD8&VJ!(ޜ]L;%p4C0BeaQ_Q qJ;ue"J BV QvOʺ@J@O%eڴQ2/hO0($ˑP( RD ) J neXz銵&AB W])4a@g:ci/;n" F*{'cD%ŊԨ҉B^Cii; :upgJV nYAU&'d=2jUSm@-d:/d`a[l-pdn)тҥ`ruARn҃ÍdHgP j6V[/XH舸0!JTx"D&jZfT^e,e%i}4- `((U97VTnh >zX:;)*`RdU l d[O]V#Hb'#҇xޝn7UCLEm]LjH-Ⱦ6 ^{@#D zvI6D@ʂ^Ls  QM@wAK8#d ]J!v0!%MZ%tMކE Q Da:ࠃ L'^ϠB.ds2is^Ou@XK4"h6Љb EcWvXT 2H F|e)ev1PDlp%KzX(&aE"LjpBȢ NN$' fcI;L6B:g8M$@Y xSթaΙ+-܋*ª:h,gԿIn%By ]yxrZ D{1sɺ6Z2fn~~b^~ ֽ풷 MݦgQ.nji |8z*o6:7=4HSp R(uZ5$޵I d`@HO`i篆{C{7MJ3| grm  I z XJcF5Jr#CK6ŹjҤDKhJT`=@ɐ!H2*zxBN2r5c5TߴUa$ T W!!8 J \$(\1BNO:lBgtvb{C!eE Hűjb' 1pճX:@ 8W *7j#Lя^"48wmH_k)-|r>נ+4;aeGHU,cB%MO~g<7-q X0턊H 0hYP>eN޾]>[\d6!8,qV!ϟ?7pWٿ|("xא0f2 r6X[>K/CP7tӫ[޾igCI<g @suH}Za1?%>86`݊+;3\6)݁3E1u@: n"'~G:Ox.uD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:Kԁc" ?+u uEu@JJ:J1ED$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:H9Tjৎ#}M|4D/n̎㥕u X_:O#1uD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:K˞: $9͍:^ZTHyD!bD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:HAuD$ Q:H9w坙 {}&ocsߛ|8g:-d8"irL"XxxE"Nht"%+z աO"o~虈ؿOE1 y=k+尰'dz|v.4g:U fzaCIb !@UqSֱp{c O`.ˌ0㬈Ga%MU :cNh3/C\,\SrDudԫ MBPֵ20|җf=IfCMΧJTm CKyrm87"|"n`8xp]ؤ,pjsTFjf6BI- :P :S:Q˜eX!o8NEJ}Rj[J~T,2;tJ%HTDQGH 򃿑PUXH QWX]_8 Jar,N`64:(1:yBzNJ5R6->M'!sPVuS: *Љ"w z,{!:@^*c.8ҕ p73lՊ畞A6*" |ҫX( wo=G>o\VD7΀n-TGla>KNӇ†jc mN̨\E@[!bfyYj-)Lz+7PPNvX524PԆ)#<*)Q [wtCGEf|2ܛLĿۨ&mtVoEg\!W,9*]Kɿ@9we^1zN͠n$*h={oF8"ڈ,x3ڝL&>_SRxB%Y*-Sp2rn/Uh.BkY~\Zj7$A,8cj<n4[\]=!SUur^}IC3XZb0r?uZ/gl/I@K- B nY+:e8тݫ ?ɶ͊~V7JfDhaK O &$T*fy"$~M ʭ%IC[Lm'n)E:gYZ)K| D]Lا.j&"'kV9\tXR~]U79,O}6Yt,S;X=Sϡ`ܺ 4T&$'hcdc#gk7:TuZA]yb4SNo-?.O {wKۭY_ 3Ɨ&{A{LfJXu>za$ӊLEYu< " 2DaS? u_p6i[om\prh%ˑZm:q^j=b >Ǘ$&F΂ӻ :΅/GiN^b};B6Sݍb3JX:wFyR}7r:f˧ۥ Ȧ lm:|Kcx\uXi,{n໫3pc6_@L^kުsFz@tWG/!֜Ã;bʃ^@R{/܂uqs@qߵjWot0 ǃ~EϛͰ3La?@Ñxu!W[- N(Pm=TShoP>;onrH@jS %Ej]Z%#RM~->8_G# IJ%Ϧ:r}_zhUE]дnY6?ثiI+wxXl /dkS 8.PT6~C&i8ȋyjӠ?a^ö_]'?z]z4 nw]>OgO7[/6MMxXȘ^j!̂/CA9Hyu8@V Ӓ)u~KanM[_ZeZbiT0YhaF\chhg+5Zh]ٰ((6m剩EvTzPRnEٶ myv ,crVӭ og!ɻT^p=ZY` 4$\>phsZ^Q;kTucb=K˖j޺[g7%Qc3ަDRj"˪?=͆qSaUĽu[0Y{!@'}-RkHo3WJq؟vp'k;e~-wΡӹW쾝I1<{#pӽnu9}jk9\oL3eFFZ(CSqFS#UjM :V#{nO.>rj؛0~wW]fiQ? [!F1.Ó9p~u`1$ ֋zĘ0Wg䮡WpR|vR_c_yv |.Lo&ڿ`^^,'#> NyuG[Ou'Ϻ)g`~"hѼ]g/GշUvP~k',]1n; @b29ϻ v:1IaL%Bi9d B+xfmp57 H=F˗;ZծKg>.â#5=}le 5|xvNXŊCiLM8$R4LfSOzcP]\C]9j؞M[l|rEҟ$rjz i& IΚ6rN`:{ 㻓wRZjɏՃe"0% s4ŕϳAeݷ\va(ȋɸڲ<uZ?|]MWmհj8[YT7`E埠}/Y7ӎ.u TZlHj-ڈ8_tAm) z{tmrny#]ܾƧޜOe7澤gt#Ĺ +Q1We0=^_RQB膦Ơo&q`K0xyǗzۏ_*.͏a˸FMw;p޾jVͥئV7M&j0kklB${7bʙi~W V巭 {W-&/bc|&_CwȯYkEx?T4a Dmn@v/7H`g#'j Q%R$}ۺP]o#ǑWK.wY;gb_@p p(y UσIZ %Rv4QSuuWsS^i:z?!Asdh\ 8\;Cӡpt!w ȳGFڙ0L9 -tq31 +&JR dUHWj2S6ـ껣[ w"hHPG! eʧlC52ayῗjm.w{mt<kB 4[>g$0e*@Ova1fYa#RځxDx3\%gUԄ 9/~^yx\C8btJ#68KF u*Gʥ@)T;?1y4%quEzPT-y;n+Mg=Lc/N!NB0)EΌ6.䑾g c2s*[NjzWA mA.a{}<&:!%1Ń1<>F v2rr.Asd4XBJ֙uT[Kdl9'7 BPX{ i) 䜖  b|y=1d1Y=g+j.e37N]s| 05u#&U5vKՍ'޽lQ0PVv{ص:A]>3s?Yl'9!°d-u,ץd0ߍTWʶ[&>yUxN0N7>x:?ݎl'nUkƠ3 Ȗ-q~])܇_}cdjBr>K d`{^ W̸k24`,L\9#D$@P>Ԙo|ݼm6PVk59XwAvQwt; Aü^e Z}z>=RG4tEdbTCP\}^(CRx נAQ?brâஐ);D1x\l#l{\_I( E7` ؖk\ w*fw j0Al8d]Hx a-َ8cvmi_u܌ƣge?9*0GEA( V$N@H@L`}@V!{ 4$/,,p!- `*8SZ% ≲|4.p, [/X%{E6FHHsFBb$#!R釙^`I,a%eCXO2YZdi=ŀ'"yd;6xڍw$ҫwЏH" tR1@b1{W@}%ZL'oemE&u3'೬Vޕׇ{ty7jq/u4÷B-˞qcA$궈~6"M=mk þ*1Wfvz] U֯$iuGP b_2:sI}Ub֒ϥ{9P:KfjV"U raрa,Xj׶Z:.vyal;uby\:!Ux`>P MNjʈhA #(rHX Hvk%c1,"F7M\ 4ov_X'B.ӹW%x2?՘BF 1Zl!H%o]ƝcJХXyD%a x;0k\A%wտ_E~|)?\0#GXoa;fO7i%}Zooo[^Ά_ZL =`,_Bj9f"ރs/%G  _֞w-Xj!<[A9|8uTT} zdug͓?Wm}QӬIY3h {YR,ɠf+ 1^ϗ]yf6?_=aI" {67Qx fWwH𫬼^T̎tYgOX\tt G(zchnLAM}iQs LC7}~4\4pXpeU*U>{bLY%֦u;o ?Hܐ`xUrSP_dxsO,OlL]dp3)\?Tѧ zZ\4vYQ_7z0╢CE⭷5M !G^PA] 8zR-B/9a E ׁ*׋8wמ09ؓf _g>Aѝ N'~_q.K !j !'?TyO1yq=c fOn(1r(1rލ4h3BEqkD\pG1R#d QIy̸ATSCGH{GʬtuUIA&6t!(5>nQYEW֌lɍbS SŽR~,~8H?P?{ٹLj.MWaLWt{~i++Ĵ'(;*סY ϳQ3Q/? ?~WHqcx9 3¡'**oyL>S"Nۉp;n'Dv"OKip;n'Jۉp;ny yDl,X"N,-YZ"Nۉp;nƻ=oWa;s92HJ5!Þv_chgMo6]gU>vWS_=/~q[nK di'#UM>6&Dc}l}F$FbGt;vɣˏ4VC{TrkLJkyMSƔ1jLSƔ1jLSgݚI ;!LӛIg~La|<Sps͙*W̫Ht65)kZr={Lіct<0k¯Ǝ"N``e>_klMť&+2FZ 9￯ժxTm v>-rWOjE(m޿1Uk5H%C`:sYd7i7XCX9Қ)vRR(s来$%CMt@|Pu|7%0{ÙE L4-(fC gr\(eñ '\c0aa[Y' 6dlP|1k]N=ʥ G7Yw@v9.tSa^fCLeW߅5?78$Z>{B>[-=0 ÞG*SkCM=w!h z$ʠx㉕V#DZݑѺ3,T8؉!IB__ɵ>4JWʴ?(p2@l H2usrM>#>x?g<$q:ؽ`7V ϢTM0/]5Xg1vs>W+ČqN:ҙ=և~[ܭWRv`vXŴFankpA(wF]a9A>~?D|W'urw5}y~L;Tn =?.fx,P]z DG˻k^s.eqe$[O6jJTj:;[rfS)h9I-6?q W0*013tVn~Bu|_=zq| ts|MxtqY˖l;Al2xރ-;D$m-]5CzH6U`OR^|:߶.WmF JVA7VVq;6>m4dq8'8 k+U+˫ .'ZJ?f?^f6}ߒm]2Q\R<*eJ2ίjQ|]U~+0TK=Ѥ~w0Iᅨ~~>C߀1OJ_(}r'tAݛfdi.M!G|vCirǸS.~|]o9W| WYnovpeqOEHgq[O%jْ@8l6ɮbR@Fl*> _lՏV:^+Xs0ZӲΜ3 i* !爝$@2Rc Z 7x|8::KZ34I`G+u+"2양NH*V~֑FQlTG 5rTF蕶,B~Z2Iը/`нT[cȷUn=(ME&堂G! 4K!n&鲩2_{j`6PC 8N(dC?\(8 p# "73e/ƚ'XO˛ %QҘ)srh.RcwJ\DTVb@3T 8*:(=غ[*$6>0](&#ϽZ0&%S˖貌L;oef :QidN e"qA*D "2NkZccT;`g8x涹 hknl uupbfZ4r@v2db1Jq+`d҂>sdV\<>==8NCSEfrY c+Ǎ&'  ڨ"٨hgstV6ߊc 577ο_E( v;U O/|L +뫛bvէ]v:|v]}hu,c8Nh X}5jKV֒s4|]a-ݛb;w2c~.Z+V{@rU>]Pj2) هW j P C";KV_ Rq(u1s@jϞGjkT =Chf@-EjӋ_`M޳42q-NQjfR7-ٞ<_7UVBfUpiW7N" H^vE-AQOЌ? ԃס*`]0!%BHu),/1 Yi`YcL{VܣCQ:9\kI7{N{/d,{ւ$p=aMU\74cx;\A]U_<]}p4g7mW /W26`E>_Hŝiz;I>󏇍ֈv' ! [W#ST[Pܠ*qYUcnBkBN(<ʏP&?0m-֋<_?`[oܞ>1h6ż>~/UOcshه\WhZAy3]ћ~8NC-ث-WBu6Ѵ#TtuQŌBYm,Knx% TBCLf#U^MH Oyr\DV< !{`ϼPE5%E?6wҗꤠ$&*UԍOS5?'s1 yVQ9" \QKJ 8A#Z p'd _󠃪7;x=}p<Ljv_*='%|T&%1Ug3a2"ݐo.r @LpVb&%d8a:NBHpawAP>jzL0Ѳf ;gFps6 eKtBЧPF)i 3um,YG׿>6٦}PC&ѝ!'3 څbH! #,D.5-#2W#*ox࣐h^ r³4 \)o"Y"/$ᔣ2*<>#E5'zhn݈,Ƴb9y#'5*kRяW$tJ@B3e|@+\@k4%rBS>.2:ǘF:x`y+1[ϣK N1 .#vkE8ܵ'wGTvs[kZ7+B"ih2Co^qjz>#2 |tS= B+ۨ3Ejl`XC4h@/e_e HSQ2@(DL1Ѡx&(ȄxemuaL,=uJ? 2(n6:,P/1ҒY]tG˜ĥ /%bH^'ѯ*C:\BLtEhiM9Z)CUYbBΙI"))5Zا&C{f511kadd.jm9kGbuxRC!>1zֻ2^+c#'U,O&r={NXLaP 3,19 *3nw3"1 8XWPf8p=k^pDIͿ iuV-!w{sYY2>q0GӋ wV%˘18 8}b>MNi͹ J8dh'3'"-VŽ%n ksu;$u:QYiҙEvp:=R!=)J!א/RNvRx4Ä&ׅ!/.f+p<< h<~0-?f%oO{ukJgx[Bݚ0ۮA^4҆hIv66=sEFCOaoz9Iab2,z[`e6j-DV\IǑK<ӞIH10:\| |hR,_n8,\:(ݵx kaݡ#* Ljt2%|y:|Y37pfϘ=cci=Շf))'`j!}Wb]zH6_ I.M]b˝.fw\oqslv*:[\3Pݲ0S#=W[w|3֨2ᑇԜnYgq1ocXdҜj/q<>Ѵ-TZe]}=}5+7-{jހPԴ8y;2t`}|:ž-wYw'O'꽆=KH>{s`fUGs-Ϧ:rszu2J}u7XZ˙ʎ"ɇA٧rHGPA'wHTɆQJ^g\뺱2"=uLGR篃KM_C6Y2'AP٨Zh^nnf)vM߿]Js~6;Œ%_ؒ2pFI>G2`tYVtxR*@R?Cˏ3^F󻳷?K߿_>ͻg?}~Xy꤬#ҝIn~5#Mu ͺZ49)~/K_E>܎ErTńߥ5| _lnmX6WZj dF1m ΀(n3:cdvV`T)E\ů'1>oN0o9^IƈGp73-k.cVn##Y¼P?1:r/lyITX$=b<&JXoH} c-0FKӁv uG|׮[ 4&7"vHd9r .R64bZe_yYPYP+NYPML0+z?PM>md`Q;GYΘ%T[e GHQ쐓L0o9݂}oC˳H bsd=G> c9m{JeB)MF* QLPa䐤XHMLckR`"'^q< cȹͼu4_kRUI3_z8<|v|)Z6XˇXcO0hU{ENk4f5I߬9}vMݾ D!+V);*F9E0 cEsfͭ$G[eu*D51cp(Pʍ"6tdXDcZ)"V"D4(mW*hGg㠧h):^8i";"EvG.C[d~ 8EvGUdߣnl? _qYY'vh7cET9 >.x9摵nCGs ZТ㬴sD,xFvyG^}'-ې5D l 4RvQ%&h]J̇Q}V _nj@М͵";8һ۳$y}}x祰\cv.'`<1裎^Lb' ٰQ7aMD%6x]p )T_gpԡ`-~SS^N7lJ,oA%qmZV{e*L2;n>V9fAԳ>hTTZXUZj-Kw1y2yeɫU䭍ϭ_ߺ=N=]@Է;A.78Ŝ9s˭^rb#V/72KʼnC`0.W2:?_TmTM:.s%(^^cPPɻ'<*&tVH#)aRnl1V#!ITP>liR=j@N8FuTkp= (~kv:3•0dn&fӔj|>4|a]Xzٻo0䵏7)u&mD3n$`\ µ.08X+ǻ=m3f~-_>U@Cg[OeukJ-8KeB(XCjmyu@L Thv^D N,\PK9{?[_g۠QҊ670[a6)jzb7e5M9im^)'p~m.uhJԱW: ,~s1޺y{ViSaB+e\ ~S"yJ䊣yJ*|9OJMڜgD?LbZ5 q[ ?jc1@-nlJUcllu- cEQ")?tp%I4EEIHJO/%eM y,5Q8DRZghY9l)q2 `C\|ZIr>-w-·aw:x!9a⧜"FIyn`rc2rJYQR.~܌iP̫IW ➍MVnl(SY; ejl|pzq lYnK.l9u7S" ,w.QU6a3['j_޾q? Vd-XIl$]&.yofֆֶH`1mHK؉\~4\VCJm= !qB5w]!v;*t8uI)S.1Jdur<[׉]wZ) JQPە k Z1W ıD-n\ib:+P!MBBo/&SA C1(y\Hӹ#h+ȋE%'k({jc I=nTYJ^U4 L|v`Rhb+I_ ~;Rȍ/rix__/ƜcJ [31pygn/Kads_-zu*?Wp}K FzEBr'iA3B,03`@R1n%Gh#APs餝\˞fT,t`Ĺ.A!f@8xB*3LdPE1Z, ʲȌۅ{"%%:UG뜨~TR:iJA{&vΕjVxJn hfHPO$xQR3@cĜEiDXp4@3nK(%PVY&jS9{8miV`ʶkT!1(nzqHy;ьހ/uI,18J`J8kbu2hgq Oy&㌬qoߟ_`AZ^&B0ᔌG"`JM-a$EV 1&* ?'Qz!ᶻ:h\Cå(?t?}XWӘG"h@QAsglrD`p'%&D3]Z47Uz֤`":ϻ*alhzթr!%̵8^P y3ULB`|)*Q*>5! {3-UXP 쩑9zHt'r 5%?\ aپ'o;Kn zV4XBo]L]\zk4YzӷuXt|'s\GC`ve[L=^VC c7'6{/# m~!v*oCnxx~!*̒-k՛,{&CDj5SDc*Q"!%K9 Q Ƒ&&-𭂬*[z,%fOu4D{S(sVA-m=3{9A!IvO%>| Nˢ6:xVe/ә nGu_ˁ!F1.“*_x@F;mwGiWWMsDIpWr[ԏ'[QݧG^BiG:ZʠMD024#Dɤ%D:C,1bCsI`WdKuxbQ")9KdHA@DhJ\!H&F< ʏJ<ա(36z1fe/ͬlB$(Ji]E*5L8gj՜q`Q@^nOm~iF+/2i1aDA:mU3\\z8H&G1]Fj'}wp*i' 5sjZ|=LA)u?Y.cOU7T/vy"域k3&?67Vycg#ʂrg"[Hf0dP +P`8"a-:RLݙ)pcgY~6[  Ke{b !jr1ꋖX,-˴ ,m|f`dI~٭)H٢|m'+4]S5(柹ZM3Itjp| FUoœY<7}giz|<0{D (s϶ō~Z[gk;g$wtqaLHu=#1zaH0Dr ևOi)>& ]k&G1kN&4j\pZ$wBHpRwEȆ1KblXVvfWמ [J~+\{0Ium,Yg2܇QW2X親+I/Sk*`boP=\O $/O\_>^~ݧK_?\~ 2.h"ҝIy7<~~hFƛ Mfhf\pjn|)B-9biey ܗ7لK 1_Տx2^KX-ƘaPi_?x/Df `gexPFiR;ɟ #S\"+Ʒ)-+Zqzzeڦ2 w[F&0/YcA=9G CB 뭐Z Dr^ets@j zmD Zf]撈3њ!yl*qlv;'n[FƠZZGc|ٴ9Wtis~5-4<0smCF0@*}]um6 2hOSeގt>=SF6hۺւ?_i~?wF\DZjCsKFDoAt.֚s >9P%s21xBg4n \TF,_vaC{@ٗ0./ }($ϲl g&I]6v6ky02hL1`O!-}`)'yGѬAsYC tO3iM3)1 MG ~~wժJzr(gnAdh 5(O+Lw P7*`m09&r6ȹM|Ew9lVJH?B";+J[h'vuǹg#r>;qUG~PTuR^?״maE:6::eK<Tz:ܵ:w@kEfZ7XWG}w-5XE҇GcqMW0igUrFVc>I{d=WL cqǣ~K]CE>ג)E 0;T:q-ԁ9c܋9 [{gSʣ3}UzQ6شڣC<r7#`0οzMȆE`RGSJmΌ6>)Dc9Ho.i^@F~uh щaöB+FB@ϫrCs7 "|{u(cP"g\H mEe`O@zk &EXL2YXǹ k$6=HgHF eC}X|;Lil׉Y,k%j!kUX-zg՘IDk1h6ܜW *|<`0WtܕT\ru^r;q&$\+wwjW8rj7q`HjbL <* s~]`FAdXXHôEf W20t2ñThmRL@*Gm7 ,3A~6Nrwܵ%wM)Js%kbÍayqs.pu*Ջ_5M?NTF&o 'H 8kC{iL XcǴ jQzHLdu4(&9bkLuq>99lL1MҸ/PQ0PyQ@lsb!VB$+5s,h4-=ǽ9tb;$#љ|ݵWqy?C%ֵ@ŏe @G톮x3"גV Po( 6RUqd.&֜@ZкjWw9(6t:m{vjBZNlwny?;V?n 3=UzinqwHisBu1Ō󓩳T,c,b*R{VeNK׶kI,18J Εp,8dNc<󺒓JGָׇ=ey9 0bQG"`@ߖ2"" TA #(H8H歝-+HK=緣}=)GVgq|nU9A}?_ӘG"h@QAsglrD`p'&rb9<6{v;Vʻ ;jIAuWu]m6}WM߱vWM߉6!U1.kmH_evH~0Y'd83~JXoqby#/Σ}yN:l|=,;V{~E_NKؿW=ӤbJ0 㛎oZ7GZA9fp"V*UW#qnq\:<_1fǘǘKuǡ5|ǔ1őbnxZ~1,mw&n"I[jӰg7XEޞȆv&d\iUiNʵI-;I[h Y&%ev},qot!j<0IUօ*iSfANkLN#rLoLQU,TZجM}튾g2ooq +7‭iU"bMm=3{9A!If!>pГ '-`zڧӳ2xyӧ/]^“ t`q5X$gT+=qp_2YHPfmE9#! ao=hNex+9VN[cB MydRϱA\sT!o#pBF¨'>qT$\.%0!S&aJ;f0JA>%lKȳͩ=:oQDv7A.xb$Jz.q7r.CuJ9 & tjsaQʹ< l~XsZK (FFxĒ1QP#H'#s:6Pl(x 4AwymIVaj)d@TDT$`631dSsS*5X4ayF1 gٻ^ߗV ؇lI̠q2B3@A`)"gJ J1ugf<769 }OU*a"L 6`M@ZX.*&3g{,,˴`X|gFsV$[SD1mߦRbn>ʩƚ FJVɪG]#L&5(k>rR}'Ӈ_ So囷ŋ&o#b@|,n̗ {WR=#Q6~go`ԴD$m-5CjH6X0 F0bŇ}5ydM(fժuYt*I sX>\na̒8g8 ++Us+ ~QQ-|7.yW< YeIi>'ptSf37Ta X] י^ oCw|7~`^&u$P7 GڃGMfi6M&{=mMvݿ. 漿"D f<#H:)wH,khixeZ>1(ɡ$ C ^߁_!?Gf `gexPFiR;7_o-Ɨ)-+Zqzze <Ýe薑J֑{N"à1}w 2*@$Z:jaJ/T#PolMǿTЄ'olCxUU锊~ c!mᬤ^&%_#5^1Pp(ZֈFUHa䐤2 #52}J! E N&xV!#g Yknoy>#YnV1\ Q0ǞI44cQxp{ԙ[} A)ss&Yӥ"J|`"Fi5 {u"$wF 1+[q$\@KeVѸd*P3"DX Zf]撈31ylJ?7x(aښ)6u uHnZWY%Η\L_IlXGၱkT V0b҅Ya:BH슻4neLpc`J`09#RtsFh%aw e^k1iTiC8,J#68R$#Z{1[,Ae6Vن"͒@j'R05ASTI8=i.a-zy슀06/~/Й~)k Ƙcdtoh/99GH(9,wJ@7Qa=0̚=c >9JP@.#F``wPxBb*WBg0;?}bCn}BC0&P.^8p<˲_'|lCrn| ?j.nju5-`fL1Ԃ᧜ Pga}vnYx$!X= פ *j:Rjsfȵ1O1M0&;u i颞ՁmA]0g?:1Lv@h5R*8r6"_`??}i C1(y3]y$ye;:m :5bfX<1s-N_{BbwX4I*7&P/j*R@]xuL*7Z(" g#Bp)Q[M*Ffd j0Dk;ڒfM5&k?+}|?vSՃ_M??XKeU420} J\B:`^el.DjB,vٯ"J3 r6XG0y4a bu<V!!A3uS/lJƂKsg H5c5&:N8퐜vvBI|?BDmY͎_-/ -;_վZ(B 1 >˃^`BrcLsz3p;AN4g=<@X.tq qR8M,sуŃ?39O:o֜=]9.v,Lqnz o_usyNZC1Ql[3 G֏j|d.&VcJP*W9otVfC]?EP2}cj 7W> E %ׯpI>o:z7TM)?Ձ- K^ j4WE9*ü1~qjmx]4BsT番3.ϭ^\EO%PPsk$*1rk HR(]z~3B:!1 3JNFL'rɠD/ k/GL Ʃ'$Rc =u\yػF+4lT!`pNƈ5<6kuJPLR,nbS<4*ÖꮣQ_/إ! 3-dRbJ[o2̉葑1P:<#1isT#/.ZDwR mszw }'ΫZ>NnI,±8ʒ9K%+51:xA s0rp= 9`BD M0\IkB$FsӠR*Z}o. jtE=z"ޕLKU*Ti$Ah^J0*$= Fi$r9zTJAF0tkjJt[32e|2Qf>G(3e"[d݌Aժf呛sP6wRGf̌q3se7eS3xʔ9o$SzfJL)=3g̔3SzfJL)=_fsqmL> a&ۺ5S*nq]^RM0%sH[]F“XT6W_[o2JΑoXۤ?i%o3@K7A0ɵ r B($<3@cĜE ⵒ4gG=Ryl1lu(f{t}m( W\Z?C{VO'Ž^S!7?+lm/3;1eCZWc5iy@~C3r~ˁ锞ÄTipK{6_3;ゾ$"`_ wKjARwnI'O Hd"n+jGƮ&&$OTyMvGk +m}$U"_q%(o7 L|ˆ^FcD2Y+^jʈhA #(H8H6GKllǶM2(緃]:Zwn=Y5:)Ks7Erw`hfmE%#Pij5;lķE.8,Ϯ>s/ƽh02ϑm+3!0&J,)5KE6~HHć5n!u+¥.LI8F h.| \KX߭Gg-;[kΆ;զNAd{'\.IjIǕwtfUbfpWgYxwI`v]K (FFxĒ1QP#H'#s:6Pl(O@|nÅu̔dfWR")9K͐ J303 B' L<9=W 腚{r}ф[X73mIhń9Q4UT!̈sA p H 8 RMнot'ov_wHrQY>&F[Ÿ/Q?SS:A}1z~ϧO ]* ~I 6`M@ZX.*n.fO`:r. Qj>7>}tCqk*%R>_UZb_*ƚ FJTgnRPFș$ MjڵA։kX>rR }.V_;܍Gcٴ1;\]cKllDEnTn7iHXki4VCkO >~LV~0MgsZ Z_뤓M6iIs>\z0X$qpѯ+_fWA*굔^x3t)OaTÓ~K@HrT\Bo`|Qpp&Ps~٭j<~ܼjFڪTMۤjfI.7za՜J({ zT Wo "qDb޾Xk-EeZُ(IN v}lBp%A;GJQ&nt~utEey˹J2FV>jcDhi^sp32фe%_N;r;<'Qyca|^(aR d"UHt'5ZLTT+ډڜr'|RA;pmP'~ 4ݘff\5VtV=ɽĤQ_xp+@ J(+ђj,D Zf]ᒈ3.!eltf=` w@[S/jgPvHL>Du/_(UOQlHၱXjT ʔV0bAV;7>ri+6 ܹgEaNcLL&~ᙤ?<!ϓlzr ݸ'@r=_pZU? nZIډ[+'j@Zjeց.Z35{_e 7MpBֱdU:.h 6Kz {3+An֯Rj._ܽ&L>nq[݆gFwްVPAzIYj ng765Ć0?z5unGi j< JR^rÜ0-AƒEiC:R˓:KpHQ1G Em HȰ, Ƹ#aREbј]̽?wH%##W/.=]*Qu{|;Mrw4,(BPF,D/x9'#n?N鉓;m[؞nՖ3"a"B(1pm9C)@|6p!A(n7Cmo@y:C 4tO(Em/U#5i2VFtҩnx\PֳGGJ*?_ q In5ox炁dmK]:R2|%gS4agux[8[4G?$0Žm:Ⱦ|;kq!Sw_GO'?>et!dR!q F֌T WaT~F`RGTRJmɌ6>)Dc2㨯GHӧ^.#+[`GGKq:uI6/xHdˇ߷zIqHJ>HLsWU͏EY-ڈ/t-.{;xek^ٶHHHQߍSU__*Ⴅ y UUS-,-#T't™21T-C fh% ֨V()5-&3!WJx`OZ`+e뤅'-og9>|\*n2mn B#A=m~kɡǣK׃7^y->7"JJBh0WxG[09ֆ`&N"L.\h9 K-G%͔VhE\ǘ@(QAF淸P4Q's7?f#.2A{&:I{ͻa&lǼ'樔xaaMGSunw{Ҭdm,r#2 _!9N v8ӷYys/Gyo՘u/KbcFV*΃#J3 %""JMK x`!FchhHL(E. Np3޷a/G^ȄQ]P`HvclymOtEdJ:DZ5A2NnO%6~EW?>XkYxC;{6}Q*I%'Eɘ($Ux=Z˩|S~_$Z r-YGsp$Ci"ihojr9"Zd(@n޴9 ˏՂʈ?81+YYqRdi "D"5ӌ[z rCY\${]v2`F'd&_&Js :|~wDdGH̖S4".;-m˱re ĪyA9|L IA;U&\4rsiIۍ2q?+ f&d u_Du8:bMTD@M2*Pi{IhxԶ[؋([my')_/T>LMW1 (v)߶c8!oVunGy Ky=q݀U@ h$Ζ%XJg+IŸZ5Us\&E*AQ 01I4N1:5+ISpkL A¢dc I`?IHψ'Hvj5Ff`, kvl'_uRKrGڔZ[s$C 'bo'tU38$N (C9vAKRPSJ$F (̊hPLj ֜b[<{Yzv67֥;@vcڝt\s30.\0;L6s]7T"<ĎOױW]։PX% 8+o$Wr[_^MցLCSw_g j.z695CZJ[Hu$HŹ *& p'ADJ+YR]6T8jB}ªբemDY] @25l[u$%4ף҆:8nsXieI˴$,AQ_XcR[fl:2db{M9kpp1@FV(z^V ]~@Ңy&toB0kd|srW%r !R0ȉ EXaIX㌻4MB'ͪ(r*)Q$Sy޵6r#E~X"vewcȒ"N UcKdgɪUdьK`U^F513MyZEӧISUWgX߷%rcrʼώ&Ȏ-shsGLsZU.=QFK_y)!)A$(d4@%Y 4sC>: @(H$22:ǘ'F:L̜2L0Rl=.Ep:EEwJ2T6gM mvTa},\iXx F?Wr1 3ΐA DF{!f&lV2 4 2i"?ZlUFz!rVg]QÔ]D\3B QʼM?cm⻔̺ЌT}7ow2'AU"W"[ym8u2^"R7@da&DQK1=$tX;8t3QRx<9רb<9i)5<3V K] ]!\K+DkZÆ(mw9ҕֈBn%BPm+DdGWgHWQ;X]`9!p%+m+DiEGWgHWYCͧ8LHßFq_??B8w  7VW!"`)zrT"sYC%6TN 6#\6aɷnfMg!w .xlC$px*\.4RkY`,L5g|@i7%:/ŁC/.1žޟ> !tu\Owv ZKΎBImJAW= a+T)thOzJJxGWgHW*mLAt •B4rBs+nV b S ]!{9]!ʶ3MJNmItUT׎CIeGWHWʍ;3Idԕ0*E9F:3yxQ#`ÊiX14h_{|9M#Jm1#Mkb.i V]!\Y ]!ZCNWvV9ҕLPM9Wc]ZuGԚttutDW5:~DkZf(;(.by-u$ T9Hh2g. .8g 0IH9ژ-`&/'Yhң.v6Q;VLq(e&=e[]`t1tp)6|Cٶ ] ]16BVBWVSvBvtutQno ]!`++ˉ,-5m+DLGWgHWsdAt~Stp=P\GWoBWQ mYQZQ#+/LH+HvsM,-0Rh vFu4}4-$Uq^˯^wSs]wSMGIЪkYqrnh:j :,9J BXI  r-# hУт+^RM!\[:9դ1ΐ xDWFXn.x"Z)@ivtu>te%th9=)JvBԁC.ŕqթ׈Qh2[l7^AW=JQS]! jZ ]!Sywʶ]"՛V b rR ]!Z)NWRَΐق 3ɋ+M)th9m+D;:G Iʃfe~}uƣjq;zy~c4gkզ4 evX}* +\0rErEZ\%!J6݀O$=JJk t#ڭiAģ[QˎpYa@%M!M+-~% [ ]![Ҫΐ֔Jcr`ˊ+D+[OWRΐ F!.i'[D+h Qn ySzPqu?N;Pļr8GrEDT4TVfV̘2Yػu]R{jA2u(Os&fnRGGe6 LЃ_oPɋqss;U8W 7`Py񧛇ҁ/y̋82Wͥ_%t8:8 \fs/_n_ȫ,\/_o?~N9x_՟c HY L%U_ʺuM ?O@Q/W8/F&F> B7"u߰k-,c z֫js6;~Ymarx%l[zsuL¤_./EP9sD1Df9f#ƫv"nIT :ǣpo@EWtq[f`D7MULw0`t^;pɮ)H#fsZk8j͙LI\&jOH1pZb_h緹݆'Œ&L{5[|43H,;]pVŗ{>G{7ޤwo ~ͮW\ݪ_T P*nM.9M?.|s[)5rGI Ʌǯ JWM_c0ɴw$)/hax ) BC*iԌj@{̪NT(_F[>SwX½bYdud߭~|ʶ}|ǾNܻ@x[X mx)bk+8YXpZU\>3z6O(WV ~ 'zԣ(bEt6~ߌ "w n_-ݧY]haRj}b~ey1?POIM7Fx%M*1%VAo^ G|gy`Wy5j9dePG-&\dIY$al17܂fz- w6jq))\:p\$KxOfAUGHZ@2^MFy0zBmn,Mj 9~Vq{uB_7cN4 fvFr`g)hW^N+ռ]MDklw5%jv5C wpzYpz<3N '\ J+Ô$Fũ̶̢xf&#Vv0xkCK5g>\m穰4̍NmȄaRQY22(SI p|Aƒ2<9:䵒9_N+r=O7ê 2uAUR΢1WtQ|uc[ACo <6>:I1q[S&nXmX˝zAƦQeeh TF$S9-TN356UMwᓮUO~Ϯ{z_hUNXh2 W`b[B/= 5d:>,t7gDp›Z]'=hrgy?eN x.-LeʬbNy&q.r?†6^[z P;z]!4'Pߊ]63&e=1`?/\*fk{kRbF} etzJy#'NA*|gG&<=p$mV-(EDZmH;| N3hfbN1x֝Ы+Vo녡N'>tjb)PGMM}4@(Ȇf&8uzQq𠚎4XM,` cmzE,$!%M麟to(SBN_S (,-=Fkbu~YZPs+C3?bU4Ax.jĨ\L̴ǁtxa]ѲӬ1Y#&zy}~YOP+j86puّeNH$*R*d T=e$wI輌8ϸ26A 40~'?tro'ԏ9) '$@%F9ᔕ$䡆TW~hv3#+[\FGKq:u\'/gQdk_ CP"qG~T0ԟ׫p_-|k+ jVfqA1=^}!*9^m-3%2_:~ΦUh\#%piS,mۄtd/N8INuYGb<}n8xY[oiTlH*RyaDTi$SDDq,h m< (E#tV#0 w)c9ke݄=̺m)*ou.?D>z@eZnw^:)Q*I%'Eɘ($Ucʫp*/rj9,.Bl|i9\(\ϔL)dHDP rDQr9@|f'V~TF9+WkoC1+4#B@Kp"5ӌ[z rC*/Mñl <οT/UrQ?>vp&b1gRecU(j0 \Q$kCըdaPN98QsNL/Qġ)gkIK'ZOHt6#P(&sr Wq^AqT%ǤT~?6PX/,"Uj6ꫤ2>{TrC H`Z`~RdS1vrwmu)nqrQuOէY.Mke~zt20G *sT,Ź:[ncd$*FWYN0"ii)[:ok6Élmfy|B q4`żGÛf6Z[eduM'Zm+ y| \>O|jq*8V7;DU.~n~L}di|*{x6/dI}2%UO 伎=~VEE?RE +*qQwq0_~pqL'Cӻw>cw 2zܪHSoDo7 ^{MilikXiegN.׻⮹?s>H;>PfOÐeb~tM|;.^XTPPUiͩcD߮@uv$p "Q'F5^VCNOs VOک ' Z0ctHhJD(L@h2Z]SDHPy&H>".o%ka1$Uс9|2_>G;餌ܥd( I $8E1J܁KQs]ccXLެ/kƿ 179PKJLJ#:>(v1'ʊJc.(]Զth s8Kd|JC7YQ2Ji!:+13/0AŒǟ(8CM|kMi!&ji.*`wItOZ#U[]VY#JCiiͱrg 0uM:d; $|9.[+< m=:z)tY"< PJ#D UI333jT}S|"oO>駫C1G?f3:qrqz53?Fm&FTjQ-EE$)˭.zU-r8V`Q0cF1梭4G|Ίcek|zC9( 3iQKmTԕ::jkj2(SQQbf~Ң-&( b/([ϧMRZ?|=tg|#\M1i\`cuV1%hMP )K$)P5"1EG3v0DI(P>DK\/T<=H Wi5MB'n(rBy˝}V7wuݘgV: ia;ev5yYWU=RSt,l?oގIǯ3dQ-uѤť.PBk"%uR PҭdZsD  Y=o*$X1$R*&R]KY"J"TY(ppNQU6($_fozfg~]p \HaVl`;8K%8 A1TBMhF&Jw1ˁ ua PB@U&ϣBf x"^"J9J;.$G *&3_j`a^|0Zv^W[Z8Oަze7" ?iĶ$ZJw![ȓcIN"('ITM1z GLZ$NOS|+.#ٙoWVuft:fz _uw|AJҦcw@huչ3==@2?2kCK,]?.ҫMt^<z4 d%ծ{5/ ?^,5/\&*.Κ_{*`y= Nlb$K~StHWrZyN_?~x;T)*{ :V.욎<8ȏfR <S3yҁ /R-TF OL3eb"ISQ*wuAtin.(A +54rg<x`x n_q{ҭoG [[w&lؖo!P"X0֟"E;grqhGh*)M 1Jì^E1ޖiXL #c4"C d` ^j8rv΢lB+8伋<GE uzLP)AJ@r) bT-Me80(3Y`"T"=&b:>錜5i&b2zCG5|s^L3"wC#tbCi.O2K&g5bhy"l-2A-ɓ()r)MS igRi6r:᭶&H]Ҟ-U1%TH:jW<5Ԥ$Yw{_!CuԠ]5, (,MnL["S`sKd*5%^an f@RG*,ވL4"2zSd*+WHH\e5qjBE\ejuq\v+իW+$XK7JJqԢ dO4*Ch%۲ksqۂ<[,Q~VW3[+D ~b2!NQRZRS Tq8PBdO7\Q({1_Y͸̊Wp 6\V_X/x7 Qg4y{MlpL4ٵ7"YfciJ!& Uk}q{Y 5-c'[|eozz1Vok5Mk7K _ŘZd[ xkLz`G>:Gë۲Hu[ sCtn}+l`+ /¼˛m8+QI^i /8v"*kSUQZ޽ n(*rs:Z闢EX5rnp-&",+^koĵ[aOV?\ow|&dU`ߣ-wY2?٣lϷ]wgu8ktno"6תfjlݶWC2?Id [Wغn]5w9vʠf!e56z~h󹱲GK-CǻuƼ#+9|GrNk~]sęV)rK[J<+Q ms ]LۯLveNREDPW!nc ,3NdL]]s CDù[bUA\z9  ϧ `M,13#Zu5Ϯ .| IJ SHScB#>". t6M2Pm;輨|\ՌQMdL6Vq<2& }x)*dx 9s'(\H3\6NnY{'FF&[A;T@Qޜ“kj1@TK Z=iǂ5qv߂&JjVVٕZM,j[Kn[ב*ă/O㰁uiq%ݏqqoz3MN^"i /Qm\ #Z eI_IyV|7VF!Re,@AV.R.!a'/A*5H ݦy% *H*y9: V g(+ؓ:k9 4q`wE OuJ',EnۺB:P¯rƢőPw9r(yI\h;7mvB-U,fat-n*KEKzv/D dXQn8hdW pˎnٱ-;^@NipgRH  TV[=7$(dm/ n+1@#A əٔ#"YZ b'ً o7k[ͽr^+kQcc)<^D$| B9a@S:#U[GP@BqkrTEjT9~ k Frk/7)c+(vJ}vSOp6 R-CyA2y4`h];b_xܱM>^}'on-^&wdAE*?ZK7x#wejIR/9Q YM")_Q/Ԋ++UW{꛹d 㣿D$.:F=RMWmm+?IowÝ|ܺke>ߞyT{M x[񌳛po?}k0M{ ]g}ػLI͖nExʿ_˗>OYF'pߍJ KY op ֱ$ۡf^ qH-f_ޢ̾כ#~n+b7g`I/VTEW\Ϡ5E j_uM7M/Ź ʆ3VYetrsrZsnq Ⱥl7?*N/9XnC,Zb{7` jvry w'OHf'`N0,NB7 V'Gv߰LԼ1m`}wNJw-4lb< n[Be7z#5:-wNZniv~ gKlzN靈xygt2Mg;>P4NռX{f>W۴7'۹=gL+.oy}iGg&'sTʴa]q`|h?Xwl) 6LD tҡ=7ՠiwОrl?\?Ͼ}a4b8 J'3J%- ^\Kۘ ;XǸK0dDEe 8?{Ƒ8@bGEٛHEl<=}HCO /CCɑDZ &|ug&=oȠݤ16c0 {Ȅ-¦U{}~Rw_>^ aڿj_sٗ|4+;I [ecf0:UQS%OBxnٓ={I(EH'au5X.T2e`81BAzL Vi+e7*gs-V^qgtaHEJf(Z6R!|U3>^mxDz!D]}|1* ЪWy3 #^}"Jgl)aw\exOP1\Zҡ=D{89]L"w}*=7w@`G3ު1KbgLhW3GXR86k00@DaָUDf)WK.LrI8F3Q{)y{iXh5rƲM|@7"mU;tl ȣq3u&'%V]|y'b52_7&5#k%1A2 F%>`F2" "j RW$+{x uLof`R,)9 E]5A@؛J\ l/(;@2f/t? ! OYSo&1ә =#!XHE k,옧YB5g6GGz폥l$Μr;/&̚qK3EŒX`רYQݍͱ|kk8w,O8o|0Oh}]TWѫy5_%?QgoFg`~o姵.ʤ4vs|}OV bUQ&~ptQ>% d}z; B#L9K)(Ԟ]M~+a199, 6!ȸH*\zc//"(4_&10C_ /长K"ŲU&|v6?Jƚ F"NlIFȉ$thC Ϡ>b1Nף,\uO~*}p^fc[\N/t;˹z6'Q~Bwy7ܮS)s!/=]uCjHvSei 3g1j~{7|g3Z18Ymu2Ⱥ^`iy<=;>ml&I?$Ql㎥+ ~NQ,_onNnMƻɝ3YR IW,S;nE0BxPpr^8g$߁xww\.޼{]\p~7uNH(Dv ^?ڼT]CbMmz(pW_~O6aDվXiͣͮ/ⵈ~ FQFa89u׫Pݟ:}` 3"Rr+5yY/AlPt`0q.V.hC@yѐ9e:7f>zF32iޑ=YpLC |pELH- T Dr,ebx^*[*T/1Ԟ_~^ZtfZ=]+T7J<=Z&oRcMgX1*AoFnyP,{S5d̗>Rft,I`U Ϫs8jf\a&kq/v>(O_,Ltݎ?fUkDCxRM"V/b5j5q)ڄWD`9pnup'0N`fu@~.?hYRwzpm];:KJ&a(ߍ>g_鴸q;w5sBUhŧd>`,ӱ,9Lإ")8=C4Wɰ?*_u ?wxIE`3&NR@+dMi5hq ,>`ɥPI}&JIDWJH.a""TﻸTrrWP\I:$t "Jj)F."Q\=qERGr<qC}Wʊ?(ҌKrHj`}8*C#"l}WJq4J{誌qlۙ|_l̜uV;?_/`B*PiYOޚ'ܛVpZ bw nD0`'jͨ}ͨ|@ v1'Q]V+H""ZɾH%EGq *u@ fru(*R+վHGqŕR66U۷/2 3u K=)^ѰcQ߲O*};M|@/b/Pk1RΫ$ 8cFY{/x@NfҧK2tsA9 S>l/6)& 7,Z:r 5iO ۛQxWco5meV "bS%e1/Ն;kW(FE^_\N0م[wƨ90i11+_^TBeƧ ޖ{YOyLqSYHE:x@8,#*]dSyGDJ7&}fSNnsr!ēQR-9K-د{MiM8 Ƙeh8膊j2]L ZhV-ꔯ nJɊM2h}ǿ6gGh? m '#"˔$ dks:5жzU3e qzJYUS~ZSd^1`ċls?Z鸾kՁWF kida03XҬtO^lco%lQ^vZWmǩhDKzTbP`}QܖX:*)]kDq4P|zY顪WM=TCUWڷT<ާ:U>MhI(ĩfK&XJn|ge<8M:4=J o۽p#^m]_跡З ޸(p*+K6ڰM+W*5/:d6Bܵnm# Ǚ1;h%#/iR:lPia<1jӐoZl.6`eW\FDj!3ZaQ~C{cP7||PBd _Ft 3 3ohj8oG-eV?#:(k5Aқ/<֓ -&#mӵj ePPX 2r9}(>p{L7ٸ.x?,B[KЕ0Qr*ˈd<0(V&P'5eDL oX9$,R7u0gK\Cb ڟNV,yE;;aW,Wl@BZr,Y:Ǥa}*LP!)n4e;KI0 Kr٧B+/xh@yK>aIΩJ!a5GL?ɮَ"pEr/GԷrkv?*( i"uAp TQM&AÛyey9;M|0zsGDV*}(/޵ܩKP" "( ք&,x -bfJ؆YHa'x͈]3Bm>ήc߰\'HbS7/1uҬ3ûw 3OVqy^w_X6h3ӱ]X 7o?m¬NYc"WEҁ@huӱ+ -m>Ґ)]?onitjVf ›[7jz<{y}-W\1|G}4~/g9n|Mcev ,g'nrFBȦVXlxMfr FzNP"'jf@˰YNpPs$Xɀq ?{WHrJA/kSR~;g yJtS$417H%̨ŪdUEFD!%uRd f:ARBrR8ez^ q,tJ`U Jn)`29ckl猛Vkylrƒ¥C}` ohg&iFue8K,VYOp|zӇ['3v҄C-Pf=*hdv '3Ņa".,\0}b 0/T͔F+")I<GE µzty'&d {Z9Eh{hxes0=Rb-Pmf\E<zmPDP-2A-ɓ()Af^PtU{CErY#TcFxI.Rg:EKbLx- ɴ-#5r#xbuT@ dZZZ#gp *aΜUZݱe 8^#0Qa>թ3BFE}i9w[QӁ|);Fz{E-QizS) k.6b%)vIY!v͉ R-ى^p:pىpr.OԦË0|ᐤa. B,b@"r8|9U3TZ---;xj3l'`d;?;A/)a@Mh=bqW A䷗j7]-[P!m'B_?)4d7ffRZd qۂ-(aN,q$oʏY:i-nATK#ERIB^Zr@8%ݜE\8scc I{2& O5HO93˗ٮ?Bdy{w.mx99n |s}ɝr&ة'Rpfыby5es~]|\oZG0 ZkŸpZ7 pcQRVrRr-ɽBsooCDaa^MKu?mxQ 6x1D 9 :+~&9wܥgsƁ2 YTjBNĄq!:(5ְNmEE].qs4oRtǖ3r]I;qY'_/N>h5<|?AUYpB9ewr .{ӤO{J!U*bb !5.IdWfK16s1IԱг77|P&&8N ir:~ܠ%8IAR)IE9ث갧Q͙#ch=ravMjw(Nv/,3.YncwD|fL"%ܕ`, >p6^3Y7u{|MPzI(Pz0bGHI7 (*i1`$)HH\_Quih2wߨU6 #zS|?Z/Ûv`bR^ OoC1pkЧ6{ v|)VƪzӅg˧sY9[F{U EOMߊ3Н ^Y`h_ YVdY~od +LWW'V#bcoZ^ޘ,]w6n#꫶ԶN 6D۶2F/7ӎ˝{f8*6'b8\dBj6=4AZ},qoN-\nsh{h zl$?蕮7%_3@M{a&cR/=Ǿ}vܼzWvE:цtI_dLW7YHQpmp҂mÜzV?2Q ) P/1W6 Ajw)&|xěy|ޏ`\9om/y־I vcw#T cX;dqŦUgLS1/]HJɓ}cSԙD?省E|6άդ/\T?\s?CKw~ a@gdޓq܏\8؏Zv;:tH« ܝm,3B;vd 3^ȢxoB^#zIӀ1Z0zdP%}uc?o`BP遒5NєE,򱌚!St v晃ebg9E "$E=1|pJ.s_rR8v=%$i|tԆm6Sө'R2D.΀A_X׾B.euebNwcåLtzHy798yׅ ?_]U8a7culQ^ 4(WCZE8Z"*̀x(Qjb\b 1C[t ̈́R颌Aw:VQ!2a.(CS0$@vkz;_g$tFLIϗVsƛ5m3W%V>>iY.ysFhlT&X*&c@Vr)x=Z˩|[eI k/X`eG IܓDj  rCݼʯ.[;8cسr1KNk|-}.~*)5ɇpI5*0E%v9"7JrNIse웲7WK-_8zBD鸚\ J 8n=)7)8+cR|89gv*9>#[1E5mfU\2\*$0snKS+|/إb&=8|-nUՙU]SԧuQ7&FXRms`TxZwKozly#Q1?{gǑۘi1K>ؒ<ƞO5J3oVw-ųdVXĎx#+Nz)tpw DƚQhaGtX/سbz9sZhJ\꺓ciGjF]ް|8D{rTg6ac[>rDž_V wi V׳N}_~޷b#:gT c8;}PT^}`?[%tS_-|ywb| g?<?_Ûo|&<雗?|<xQ1 u7 qKjPhɵ\ǬN~:~ g>Y JP&LJ=CA]2u+b DS6`Sn.pխ\vs• (\`kzPupխܸW?p:PȘ+Wqyӽtx=Jl:H2ItFiϮzy@6#3ǝ;8@ٽg[oѹø;?:^nnv:AGyzNߢF;gQ1i]]eU^ܰFkx X`I_Bnp7"Q{.G{,W\EqAVy2h ə̇2 k\>139 *fNS.WRQo>GJuFMʤ4룴YFMxj_[?Vx4ìkba-oca^|gwks)ZբBl T}Kv)DUq^[J6 К&i$ l\ܽe$TZʘ6eq1rѪ?I͉b7Gq 4J*.wO PֺZcPO^Z fI1AUЍ\[A`uIQk1^馎{{|5crIJ[׌R8* JTGTI Bki{5XŌilJY ol&ۀ)J#I>bg`-M?F棾dVHeqph0ša4Q6G#4) oR!TT2BX&8}Uet,yrh.5Y`o2`&\1aA{/ZvcvYxc*ax+uH(R4֧D!۪o?2X ׏&ȾtFVcm8'*RI`Ef-\ 2E%dFk(0iF7[(c} &GƑ֑~X+#&eǬ .)l˅b=I+y1뜵'Q\2c I&l.-6JR`ƨ1K‹eXtdM!(dGoUoTddC0!_& E2Uc]@SK.!`TN!(G!)MٰՆ*Zex@:s!{X EW z#آyX|/bnJc q z)Z wUTIXI2,#<"f  Fk=I2Li,R; T_SNPV $_Ld2ji@ Avm$Mr3._s*+1e4PNiPY0(3Mr%2Y,5%6PdY;0a*?%Qvdr=c@T Ÿ_e ` I&- pX3T g2B@Q"!\`#)#Ǎhґ%%d~CBY'O"@O) uJ 9GC^~O J$Ysd[BXyDoթJ_JAQh8(Y, c]QzUP:o3V7_&8M23u&\k"֭=Ec( 9ih^1FL:rbB &"^fQs=>*q‘ PӦ d黒@AhKQ#ҫwSPxdgܸơIE ʃZRF<@HE&rZ!d^P>8M>)3]7h8{* ×TdCV>H!3XhGWˌEU&USY_<{U|ΈJ!m+ɒ3.$S1UvFWhy\E4Χ,}/V_b=N-{ԥt13ŤuNTHT`Qw@r! :fH Hޙ jcJII9Qv9 ѵc[ xAK^/B>J3hLMD0jik j+LG ЃHߔ,EcnXdי4H@̔HRd7J!B2@bEED֨%/10l" %MN塜ȡhjͥVa^b;,?#*DP5VY]xfӣQ)Ԥފ{)px_h,e I IX6 Q}=Kt KU FhӸ󠭽ݴhg^-på\+h4Pш`aiFh%й1k00"'4YGMŰƬT-GQ"e0%6fSFl-'Sܯ~XNO(r]gd*DE(eBAJ@R9$diOW,Xoڬ׋aӣQ@|Yn7؊DRe 4Iv#oo'. U@ SBT/2j#5JuT/FB,z"ANi,WQti#ʅ0ATj hNLUƺ)c&AJ_4&zPJQZ^گT'#)ɐ+*-L欭O֢~B):9>VKi|+M?샩_x3*"awU`mPi#bǍ.LJVyˤ J ̀|" =ʨu!1\?MДH A8ANZ{*.(=GB%aُR13I VA/^A\T"N곱Ĕ5bUj4lM/2caVCMrƛP$CJ,=uRZ`4b(MU~ZWW4<o"B [!% R GQ~~[M/l igsI /d7(N^<- 8yfzlQiN#hzxsO@OoprTvz/',35_V{w+n RgG!0Xp+̃`ٻ̇~Uw+b+b+b+b+b+b+b+b+b+b+b+bSWXSP+XkUW_"p_d+b+b+b+b+b+b+b+b+b+b+b+peIG.~Ă;;GoaA`ptC olmE9ڈ.nľ9/ 4 h+r(YkmͿ~9X#4{Nq2y~?X̾ǏSԝ}=^GzlUK \,GڳEA}gVD-vޚ"ԥ p|/Wd0V2p\a c qp)+b+b+b+b+b+b+b+b+b+b/"+P, ~FLQaҿm-u W/ ߋݓ>":o_SLJ;hއ7,gs=:|X j\KƑ5IQRB$_Sb Kܼ__<^=ldhf’j%q sj7$u&Y$̵%h2}dsaz-Dztq[:_6hύmg?5> ?:SÊv.y}fFtYЄ>}t']T*@=m1NwýF{݈2HOfo秳'2rauY* ݙ7fϗ7£gI{8+o! d  =CS"J俿9xCRP$1l4ka۸ aY4HC%6Z*$`$UQM0XW᳹KS Pݍ} wcW^=&crF*,و"Z([WS}F*G\U-;yqWoP\Q5g$|Arչ-.v +FW4=qjt6`vWoP\q©;WBU{co9VQr^/Dp~^_HFX+NG]MFrlT&b=p/Tޟ}w]S0z  ts ZO]H'([ҒHp>/kQ>+n~?թR"lDoJ(ˌ/ 2XV'>ovy5eD C1(D"cLHj)C-/MKu?:ڝ?aB3kW8 nT|\/ȆY}`}T}S|P_0}Oon &ZтFq? Y6ty^ iwePYp=BXs( vK#v8$Tf|ldQTr6*+\.d)/ MWפn"K9gy)@(D^!I??~^!vA<8g2k!|'w_v6僙\S}嗫``Y`o@3F,$2/g1?Iu^,-cQ>y&X LƳQK48)`:I?]]^F.9^S5hKs=51Ä2c{fx,%Fn^hZ2KB2ˑ΢&YYqNҹj&Y:[p ϘӔ94ף%kY8˽|=9!JMfIH4ѿ%l@usK\"ϭ>Р,OHTGㄥVQq jbSFSqdp hl pI<` UN9vZ[NݧnO+|>i.m:Is1xkO0d0:yz@02lf3bзYL#3Y;Pvvz<}*wFnhai5KmBhKYMeXW~`ʮI~=tsv0&[/9;JH z!K WiEBd*Bsj^M-iZg0~y <t۫!x S0.Y׈gз 9Xsi=nsx_+͚iڡROE nkeYM7'7X L<iX{))Jnpb΂wDðq I| ߹l@Cz&MאuX Fl Q$Z-aSÇ bew3;xbYwWGf)^)採'@һ^m;(ԳԶ`]fw [:mېqAOU)Nʝ[qwnrS#w,7l=`41X qoAhń9Q4UT!̈sA #Ǒ@2){㎑e}å<|zEeVqWi?3S6H+_>ٷF EƯR/pj^ezwkЫYipňK{?"bH&WJ H)D01RL݅pgEzV UEx85iatH\$o}w]87׉pX| `s᥻{cSbDھ+3^}Q5(E\FOFȅ$MjI=p|\J{#L~-+'#ZU>x~z[}~Q1{ s|s[9nDhoA~ LJDƕ_]2qD2ևᨐOt|]asUﵫyzM6٪yfGĥ^Fecjyw3ATLK_ KuROn,)#!9AR|h4+ku󞫦(*T!a(ra[`.?>\ ipMf ,?4#m- KSvYZ Ửrú߯Kź"D)H~P"(ᾘ']='Eb}M04z &R'['B{ a\ zYW RYU4.- L!1eJS<W}NvZ&w$/r%B#AFEBiᅢek{D Dk86aP_J,!@H6ڸ֏-rj03؏5Y_7_)q|5?_l@ * &N &݆2#@J&bﻰG+;݅`m܆էY+y.XbWgN/B{}xczʭakC!I깾iDozMB%J) ^tƂn3n^UW=<.uy5-˞#\ P}Qg -U)Ha21)v\s#Smr1^gա :^.I998\Ĩy, s$)$XT$>yL?c:!RZWȂ;9M>Qt"F!~wm'+M5S_^ׯ"2PoC}׈Fq;lyv|f?gD0<-ZSV3tAc6ۈ˰C W}""+\ṛ\k"jN4%cJI+L S5Gna0'F6dk,[0D9h%J.D 'ѶsQxRh k-F,`圄>%lH-55te;9?QNf•&EJsz||kAPGҀGpّ6ZAQyCO8^"C6$ּ_mjZO=S|k}\]e}b/5EчP\p0p_LҴ&?^گE7.5Oڍ' |rݜJ?r9ZeY>Ee-ߴ|s|s eodA;+i}"dD`Uoo[_1"|Z<Y-_ 3[lug-CWőe3ŽcЂ= nofR9ľPJ=to뮁AQ4_3O֔:ES2jֆBLmى@7'w tv7Љ|pJ.5J\sT";QBGǣv6 bM^ *TG#E|$5.C%ZZFbC ƆI4R%kgA1xꅜmfӧ/%&@YZ} i𪑌5#%V])Ŝ6 ׆KbjкOHGN$9R-R|gxj;k+Zam 4!p"UA-FJf@Kы鷆+]*%~I9Ep'?Ƣgz 9%b\va~+ fpN51EXJI)v8׮{w>>]S&?$B#H'$:N(OˇKAp[ǿbokߦkb(,Kw1}[ j^MWWI{xQ 5 iE.'?c!7[P}T%&0j\YVͩ~]Uߎ>fdqmtz۹0^[nvITnz?f!޻z7]nB'ʞ@,zU7le7ȕ;d{7|̫M{7Y}hl^Ydz]5W@.2^4#Nx$w,|v3a,nR=ciSAT5oϷ_7ϑw>F7q"ΰuը8*J]3Uxq;$ߠ|}?߾{]=op_Fѣ:_5w¿vaϛw I\&]&g]/&&߿-Z~:+ʰAp*GV*!U|UD9k_B~ 7OکB1:$4%"H&XLr<`Jɸ60.e>PBBL2ĵD~`JPg  % ^t~Y@6 Dʛ\!( !T#^#7R5νؕ \E~3O 2RLqbA堢倿'Rs`QhhQļsh,2DzI߂6*jB6*ZFy+GĊA ұeF |4s{4ttmn;1mvzQV6Ls}>}]So?7w^no<=_&<ϓӺfmW s./p6~<>G/w/W83W\$)y}Ri4lA''N@H p8;|t[R3JaR!v^r ݘoc]'B}v~ u)m<@C3@*˛DGkd @LpVb&>1ppQk'S ?WCY eDYۧ]]++ t-. i iY)e}Rt.  6jx;B}!B5hٳioHḆN͎z0w6L.L9OL!.N0#1y@Dy'>.ϡVkpU/&6P )EL$3D !Y\Us+ǹ&_tu=# YjU"DhIHd.8PZDiM"84 .|ei3uBPmy*E!l.Y-& `qbZ$qZ8 pmh4i155J1o[:BO3Q;N8krt+p<="Z:̙a9f4 y/%%hDp r<$P[K{~v9u2>!ϞCQz%b∠T6{\R٥$R[. d`-)+/f3iigAevGUxW!6~ D]^W\0" ؚ`Kk2i{xG6݈>]ؒ`Yen _|JG -χfw{}}^[hK9|GTsG h"B=ݽLx.>ʬ {~ً){.?-)@%eبTJȁ&Ÿ@YLx(djpLfo*YƔ$depnrrLkeP¡RWgY Cl, {Vڞ=>ds_q0ҽ4 Bs>T:AXΡ F$,*duDP@fCN,1eL K7> I VAhɒ$HT S'5%B  $|4y^C}2X?ǡwkT\QA 1 qTNZO 6r+u*F}{.IĕN\,B݅b}FO7LA_+|gv@j@|tҒM%jWnYf5izszs wMY*~fn_ƈԊ7 "En&/)M3hfȘSϺirfjJqbx 5x3 SzիƲS]BkI ng YiaNOoLY򛕻$+oJ-G9n35Ϝ;]>wXn]Ƒt6r ,ҴǬHņګ*UfaN{,Y)4R)cسȬwϝPia.B@' }uNr~پ(Wmx[)pR!6ewO xFAҙ+RY7V$KY){.KփQ9Z.C#BQf`ͧ37u4N27tuE:O{v86X:gO.V]tPrߦ^T3 &jߙs47wяC¨B=+6d !d(8),4m).4!b2y>8%Fk08P xXtƳ|d2L*3G2:#8(*5ʁSVr bO6Rg?pg9@x{ݤx[ {;ZuWτW%UVǫ3E+J_2Fp\'9xJCc#\[ ?8I/$*ήIsWxcuynmp#Pq)(,jĴQ2G1m8 g"򘒵*ڦ! Z3qN›A+ЩW1qi5>jr4Be]M=ϲn:p &E@pix4G!m*.5=K]Wt|}gb^23..VVd]e :iRfN3*9'bu%B򅹎H? ?ފA:BP @rP504X@h ɡ"$%d jE^}&TAi&eT'MYʺO W'ٻ6$WI/]vbo۷m G" ߯z(TH1 ٚWJ!$ǎL3nA:p.P7NcH)ῗ?v?ҴD┖V2y`sZ7B! _:%'SaKdHH1 K**(gKRZOr\c1炒"7'$* |Nv,τX0YTQ}Z8VGօُaVCgQؕ=5:^Ry1ZWЍiȎc1Oov'?lϊO ATu8%BWI͊^Z N}Q̎q#SCXtNrױQ8+W &ܛ% bpr@j!Ya9\ J+l;ӓ8`S*g[s;s[HDmOMWP{\ʹ$0c[),r[Uc&=E׉|k1*֍zݨ~<JUk}euɅg-0zX/A~ۖ붝SeHg8f((ձbyIga+u.ͥ78i} +~W5muM7.-KVrU 8̬Qf >? /\(d2qL;4接3{nFDg9|x5YQ %e D92U槯dFË: co'*T\DE wTUH}WW7ogw'7H'/="|?_}y7'Xɳ?;y{\~KUgηvn㳟/خEs -wYISeWbj^םB4{ 1*TBJ'"Z60$r~q4;{X9Mv.G|!Z0c/ в$a}) ^U^(@[mãkWM84Dyw!> _He$%Sx]j=(s&puvbi= 'ƴpץEyB_ڏC/]YW\ߑGJ 2s8 FxM FE)#{h&{4+aC8 Ax^_Ja3E<˨*b!z @IYh!N>GJS%(bXcpe 9hDPY R0  };Bt{u>UCء+4$u+G^i68+rIAڡmI3LqFsȲT%/!ASϜ( ( Q/e Te [ϖH*-tXt.:кKSdaRxlahijKeEYÿ^3%҇+8EBVPHp\0kcαP[{[=̵>o5.W}"jO=yRNP1xscOďӣZyz7%6E/ΆW>>2-}y|;<,Y-Q΅ c2I8"dEpH<pfߧTڲnIrzMsW\gr7wkUqhjql7s{oUC`CcIYL0cL3ôN]})s- ҁ0(O7؋":eN܎X*"cϣFgdsm:OaV߇;^=z$PS 9J {_jmSPvVݹayڳ؇Ynz;7hai(98bB&/`S[oZwqFk-Cg\cjd0M7N4/oفͩ-ӴM.i2MLݧnI/yotQp2@~FJK &ō ylpe/b/gćsGëQě0{l@۪vWTPP̛XdbDJ›&Ӡ1o'T`L@2tR ]EtQBJ) $DWt" FHWJBd*hpJkj$OU HWKWv3xtepRXp5MƝv*ؚ]:D+OWrߺ{3ZE~j34tZoSC9 Uy2tp$CW-3MdKWHWpIiBtk ]!\E!hy*7Q]E2pI"Z#NWҕ 3ƒ29"ZINW-]"]1A57~*CEʮ0qbi7fĸ+pM2`IPt(y+U"M+۞tv=v3BWo:]E!ҕ:!Ҕ#+r{*thA4^B ܤeBten ]*OJW"zte|d]@ľs5p? ٷﺓ?pZw>ثoq6a-ynpdA]BjţNnzJW(( լ|fBVco1ݽ lWy-#~VP ެUo vw 9 CP'nxv6T8k/w9^t^SEBk+[>w}n2q`]bЯu¤ nWXܵ"Gw΀ ]9YZ:f^v n6,/qv"GGK8L^k6URsk <$7Np:czEG-;*CjoM"u8/zԎ㠏Tҟ #6mU(!mӿ ݡP* =`ï Fu5qMΛ0~w> 67nN6gGwJgzst.Pe>Ijq F(9v ,# Ult2tpqAN"ZΚNWeӌ-]= ]j* $DWX ]!%ۦUD+h*+T7 fJq6_(U"] M=p,2B(IWؠ8 L#\i.Cim~QŚ{}̝zv|ehix 1+fM(wF4k{9,6+V=Vv뉺6ݩ|L'䊋 7If]թl#ڇ63mŎ(9[bd2%ÍPF\BWC/^UD)EKWHW6)IWx9t6I*t%Wt(Ywte 6"Ӭ!xEJٺ|=tkv=t{cLUVףsj2?}~8 lخWTj$V5f6Fu)_x5~zછ%z>ٟ ݞjܚpo|G5g77nT3so!Tst2ٿV3]v b޷[g٘1$X6~i.(>|:tH/eBvmqJJMxa=+k>MZ.7*գ4 5kHn[]7;3Z4һ5 xF}' A=x:mu8}5v&ط}}}?/,h 3kO"㌫a }GR"7ok675<- 3Q6t@8   j8*.&7X'yٻa9&Qya"//d9t254݆q?ޭFa\f_9җ+`*#岻 >) }Dz;&qH}*|q8D\dԻ1 85m^96^/0HYcqΕp,8dmxCeA74¿ G-Gm]mTe+̍įgSNr`Y ̐JH MÕDu("S4Hv=0 9H# be}070 G!eLD ܻ%őqh$W_dfq񢫺֚e;sL4GX;:^1o& 24wey"R(r`[/^B@b 0I-X(BPG,x+"x90/(oYGqg=>Ìzga=:D(E]1`&90-!%*V2=)r)7j<[֑aY}X+ ^k%+W"Iس5rcO>'n?&=:y"':8թO?\qgG-=QPq4۽il7Wlw?aVdY7ߜ%` p3"اk"k/%%X .XZYh67Iw ֠4m%0w%K5D'ւILv mOq sl w3;taYwW3VKPbjX^mJ(6lB4 %mۆK<=t (ˡNʝpwn#;]ޕpgn'iV'ŒG) Ĕ LL}nqhNVjo6oZ"hV$m}cf/EŞƠZLeLTY5hgVMjfmE9#!MBCijIw ЙDgvp]r3}*<~4샞qGuy o5u{2!0&ЀJ,)5KE6Q*m$zCrE„ OcԪK>%wkYh5rxmt'$ ^6I]J3ДtV<؟C~8|*|Cᵬh|M>$Rm$&J($"DpH&-!L^lP//@|eà %@ 4H40.e 'Rr Ȑ J37 0B'Lߴ9 ˏՂJ?N; dNͬY3mI{S5(E\Ղ.!7)+Pai 1jgub_+Wݯuɛtr5{z#9̗|q|U-˹]qeŰy:{ o1P'Ʒt i"byLC1`ŤŇn>Mcp^ |M6EesIsX>]jzZl$6zgzckjyﻅ c/]d,U_ʒzg $9HϿ 7u"~YUؤ #w UJ{=~Qg?&O?OOxͧW?]|z߯? LKd6@Ii'p뿾]3V߼kCmz6W9~/hxݍD"FL#ڀ}~OC$V틝:x\;9΍$墲gr.ᘮ \{vF51jP mKboq8sHz0X!>D_aH~ c-J`ݦN+:gښ80ut`4Sv c!m:˨^&Xtnʃ5(j`b9$$V8m0RS-yڇT"'^q04z tž8niSQ0=}nGdTaK>)K 5S*#dGY4&jci-ўʃ9SmFP4 td\pbȒP"G-.sIęx9yl.B[|o Zx;:w܁qqK惜+[misy[Rix`,(3xn23ŃtI9;a%AC:z/#k?@)ŅX%#Sӄ qŢ5BK. ÞJ im5&zP0N'H ^ wE%KɍUe@k< Ć@:#cqc3߷޻epd;i/)Z#<cN%տ1iãp|$)G'!,w 8<\Iq~߽i#O?=¦6+PUggZҘ/g#Y>@ `}ՍFR$NWl=FH6"YcYBk{ (Gp1kLP_J,*B!E"m!0-q"tDםlk Mj$THɄ Tx D1kdA$Yu '൜ϵi(‘Yo0{+cq}t9vKk^vX~H?3< (௜%J) ^oC0t[l^7WRRQ5M+jN^#y9΄jV;wK>xI9޴?Yˣl)/mYInϪ^+>F;AZR0dcSx.]b&ep[Hpvr:1QD]r!X-q+wW+7-dmzކߙu7N~mS^yP '̓ῴ.iR[VJPP._9ȹL"%ܕ`, 6TG}zZF\qZVeԲ]t(]_aJfjZҴ$R5(Q&ذd1Br)!Mbڳe*m,F 52.4҆ sn+'3*?= k0 eZJA/H1-3Nh5D;-NoVXg~{~hQ.6x^}?;n1["L`VVocSr(5NScp@ :g(@,J@mrpTn5~1ù$_FesBN)L}0q!z R i+rb8.MzCs"e1QB"i4)D<ʍb\pJ'Sh_w-~X-wU[\iW[Ȃ˼Gꍇe.hq@W6=eC?/7ݳVTH2:G F6Y]e+@Fk"%uRBA3LARBrhaq!qz^ q,tJ`U Jn)`TX9{T ݪ\ kcP֋mp/,S66ħNV᩻kG~ZbO?u;W؞rs6S{X@|$ ą,LAޛ 2X]o>1SNd?1.& sb& mc"^Env^X0.k#QH}t" JhYﴠ+*mأ煖aootKܩ{]x.O;ʨggMwv}b/ۼ/bW[k_6ݲAqH0ޘG$lJcVk{D*b6=l!*ܠ0m$Ƅigj*S)I Wo3 ^vp [lLYl]Â6YiV8Y0`Aĝw O+iD9N%e%:ywe( /o?nǝn5PR{1ϸ,{dZW_X\ߋGն8iʜya;e0fAa`6=a\zq!8=4e.pɃ0}x /& mrBq?UVm+F6AޜWGw{U9i9X[}yȆvϻˉIg+O~SVVmڕceE@:B,#T'tg>A0CР$XPјL.kIhVk?ZT*-i Ap ͹jBj%QWJ[+_ ֤97\ qpԼ7WHMgF%k\erA42J^;\e*i꫁+[B pu pu^V iWQɮLlg.hW㫫{ϓo,YUu))"pS, "(I !Ҡ4V8-OXcRd%SR`1N *q%#G9f2~R)` 2Ig-*1 b~ͯkEG;OyR.kև+FerA7ejr[R [U4ISW\*}pԦ7W)x V?=\ ML4Wʅ\Bh*WH.)pyp-\ELijW`ݜkL1M+ְT.\p ZiILA5wj&9ʦh-9H0!@S`:S˯.S)[~08žl\!4GRr~pF-•抚&̤j \er_5$BjQ۾vTrS4 ݵ1pmGV_1TjWjϥWKi}~O4U**b]u/NAB4ҥK@Fc>q/e2cT R%2 R )Q6ڜH)}~R\##a FAT.WW-jqj{;LN-·OPJ[-7ӗQO"~ t [Xel+ln*@iCcʗIZ޶A9s83g*Q99TUW1gIN3ȒI(d0‹ hB*½㌻4MB'I%?#e1QB"i4)DdPnk T:RDK~Fn,j9 nL#~ed@,2}[Eb,ͩfZ_5뢞*╣w;9$etj&m, ZCWNDJꂥ f:AEFU !9GrguH2G5$X1$R*&6rv#v-"XXg^,³bR\qz&;zqwy' J+GlO\ENMNaQ$3 =ޤH[k!60c]M.$L@ '$&D5#vmFlZP68ԋZn5MbjZ0]hӂZȨR@+bBDdi/QqDP0ģQRikٍQNcQԌE+)!RQ&IEyPR׀6G=Jŭs=:N7\5VҮ^-agFϣq| jE#rzܝ7uUC/*.q3//|p*q~wW{ӉU8n{CPTcSQo;9\Gˬo;чx0BE2uK_zgw rGh7T;y1L,[`Gx| G6-'!a5޸|ywl]2) >̺Ȁ ^DLoE^ىOKm%S ̧ WU7ON^^__QL,Tw-W:v|W}>;ڳiJ-zsjj~n׸G<+[&h~\|1w7k߸zUw/eOYKeT9r1짊n3ᣊ[jbUZ9VJm~vv"qf] Qɉ^2TA"jũ4߄["5B-~./q~0ßa[QKҦvDm칂}B8Oo+EХP֬8OަZo[vozԌ(Zvw!߼' !2&94'7&-P s;9yޒ('^0̖tt޿N$^Jڨ8SWl6w{}У[$)۷k4غsrz{8юNmt;nlVb[v~KݝO#yv~[=L^]XD; D锨ăCD+hS8wkԒjhdu]jk|PEw{M +ۢX~'>|wusߴy{z|27ra߼~S@6^ܼ'-s>}<۬o 3?/|=gp*tݽR[UB(Cz),ucR/erhJ% OdMS/Q0L<=U+'ổߧgoS &WD;BSIiRh2Zj4zU4$=ӭ2ko_au̮V9\0}b ` ^j8J6S24B+f$I ;%y gzq7e #O#Tl5}#H(*I2eUʈ⎔$cͣDz0ǃS ĩv!ZTdHfEdPD[)U.FΎ^塳£S<{8%iד/Y֭вɇ>o&,xs ;y",{KLFPKy$J8 or)MQ䴳FFC4R9XV[\.it*Ř[d*$tc1r^8Ɏ|lini)&NzEk;!Lgyu_mͮM'8W%H`WEqť L1ĩCjYᱻ{w@-\ &*Cxdx=6>2 `ocF6g!p"UDD(U .s.1.1E3sQE)cw…&ukĦ(Cd(<j4CQfw):o ]-r[җ{ e#wQYQ*I%'Eɘ(ЄS^%:+SK*+B|a=t43Ǵ,eŔ:$Ci"iP. GD%LlFZ~7n؟Xf?zf1+YwqRdi ,f܂#/721$ cOGO}cM04Y%q4 kP@HB!$D#39z!k,#SM@AUQ&lQy4儙QI@% "ʀ/ɖ;gԚMTl{uto}??s{SerF h]e6UC\u~΍|3g .8%B=n/L@Ί"m_T" Se!\\+9ŽF.FcwgZhwUH'$:ss U̧{-y]werQ>lF4W@.2QGg<}>Q R'KzA;dC3炨~,;<||4[YóY<蠤W:F$;nT{*J2U; 䗟>O~ˇ>}!|/~𽌣ρHTy z |7?>J ͆vZtwהq:.waR}Ύ\l*!7لj@2K~U?ZFK`_1\I>izN Д#B0d#TJơFֆ[fҥ&Z-t@b RKTFL j_5 uPMuq)5=k}ݦ+yp_N#M+΁LkpUt%A/=Y/7|^( Q{ cW8I$v&hrDӞQ!͛7-"ABDoqTiRP w6h#Ev˶$NxbNzcpeb:Gr:Oҁ١Tq 'N5Y#\y*ha"@7$j˘ 2XBK"yl B]5UiA :YmR9:1VdX 6@uFʽii2Vr<2/H&>b<Ą;sGd.Eu-r m#DOk8qG57ӘA~ 9TBg|%8reEHZWDT.j[9 SyCVs|/>晕/PJ Yy G,<;:+ \IjAio3 DZS36DB-ME 1AOZ#U[]a@1r^FVIU$t \.n(-3m%b/]"/V?]*آ#j4CGK/r] WiQVHGqf+I S8MiѾ c#v4p^@J;S:|`;B@hNoə+!8GxQpD$.}b"FyjR 9NXfSv:* Y@gQ1r^G(fػD^qCϠc8DQ#!Rޢ>4Ƚ$frt ;?e˼Ew~ƉLgS+ -/X[ap,g .i9kmߜHTO2kޏU⦀V@ h$X*gHŸjZZM9Q_*AQ 01I4N1:5+ISpkVBPƹ>H8NR$23⩣9$Jb VP=#6Niīn!pol@.`$w"+C>'>Ĕ>?_HI\'w/ݦ.M \W "h-9Pv9Š mDOj|"$f:A670e8rh7. /4cQja- ,QBWKI0o(˨lTl[cy]Fm$p[Ol߃bb :Iݺ虚ɕ\zfjjzLn{ǞL,_-Lg ŰW@7)HH\T_Queh2޷MT7q.zdvzN_Uu>/V:8l݇KP[EM]pQ1cAΰ!P%n7}|+J}޿>QͥW&Giݺ,s~siF Zi|s$2*w6DnDGnqVޱ1 j:qH[<_52f˘cͺ |uybjL;z//LWc'/#4l Bm yzz ho㸕rMr+&VҭiP;cH"i4ǻ0{=I{\сij@XtqohzTez?o^x=lS[fNo<0I߭\?͸aw/< 19Vc4&zT?"=eq<~īNYpXg"J}틾s'DրàQq82lvC'$pJLJgL 6<B@VQ`R[ǸK06EjjMjzXlopxmo?Z65^V5:hESȩA߇^A' R\8G}O:,52^ *TG#E|$5.--Xi#1ˡ 9D#r(?4'_]kEBnij{|Mb/Zvк `g ū'«Z5Ky|WBxx+,%wJ,2$TF ̊hXт7TC{K.t^p?♶O]rCmi~K9:\!X}sP t;A&;c/_QX[.:o"rzozw[?`˴ߺu&^4G0tZkPZ@aBɅct֣T'TF v H|yշ`/QIP&s*FVƆTeM3GHo/*?e(E}۵kt{ᙁöEZAZRaVlPl?V LPE W=W1 QS$dO26\‚hx34/^+v /^ǥ`6Dq%HL*j%OD@hr΄7\Kn,!><+&`ՆNLΫk5 obDˇYW%{v^IGP!5'P5Ŭɒd+$A E稫0-uI̼gwSuu|{o[ZD+"w=.INfpW_o:ϝ`<5j"|wVy%gLJ a%vl?qY.pLu/kgЏ2/O?$7Mؑ&9l"gUpɃ0uo][o#;r+>e"!.C} 8Vn{8A{ؒZV݃s梦j g~}4޿yۆ-26Jz?MeʮFmYxhi3{s \=\3B#+e92y_WϿ6[yIgtwW~rT Pզ2L4d!DNb5ɕ~$1 \e1: e,XUs3sF)MtJ͔nngSrӵUW?ǷWqè**+3R\ǭW|L Xc&|pY`hS2%:j!4(.{$N*=L\yBaRꖹRwcTJwH`xgઈ +pUpU\0Iz:pōw;W$b"ٽUm>Zu$\q \i HJXW_Wa+2*r*ֳ"ep%Œs}PњI.2@m XL\uUq LXaHٶ2;=LLkƹώuWEZUR+•aR !*+t'"Z7Rp\d Cgઈ+:+Jv*R>y9|AKta I/ovepe+Sբ+K쁗r`X y愊'6R 5;sY/ZJ8~Twc/ʝ;' }4A>i-eC6*ZAĝ7`2y}0rsR1,6%@hPӲ=:K i t5|(~ޥ+OXj)^_]-4 gy4QS?G?G_xӂYF+`^za65ں2PL;*Jh{k} 4|f>f0.ן{Fao4 /F\?ǎوmo^0R-Toim(I[aXn*6rP[cvB.Y$dϪԢp'9?dږ?tԈ)g(YghQxT(IEBK NL 6f 0A gCٔC%Z]mMƹ8M-)mW~L ׯSG7,fWeZެLW8& mSbg! KE2 Y!*OB/bNi/Q2֨u-Tuʌ8_K H+8p$ް7&n%~e]皘c5cumRf~2Xyp{QmW4z@)YuUYc2uʊͤ FfYaf{Vز2W[zB͹wȩ>N@ V.wFxnυ tՌ>dޅ7&U;ycrNɥ 3ހM)zcp[@LG3epVz7[GbG^5=\Uݡȃ_te%Wъ-q!!eQPi5D$[3h KiֲNd[2_Y!l]B[L =B ckLhزnL*nKXH[(մǗ's]dCR >Vv)O3{׏춗IZz(Ѣlu54ɃَG/ ϻslwYvI%(䐍u%YpA#-t0G pm>Get1{όt pL0Rl=.E3:%\Iu@idlLȸ\ [bj X*Qy^>x8xmRay *7>O؁O̵)xMÑ#Řc>&@pd҆`sb̨L-HP?9s^M4Fm$ `f6U9̂6 #vcFl0 e[Pq$j̍q%BD9 DYKY&/5`D٘VI4Bd!3$+:Djc"䉛8FYFȨ&t acÉQ?`l "6>ED0"{DܶSVHg+-0 *ÂMɀD,qV"Cuh 04m$ YiQ#g|DTIɄ@jQ Fٍ∧F!M$=Xj5?^:;RC^WR?D0YsD}j1է](UΠF{T@N\Ɉ 2JWyU`dΐr}N\ג"ОC\G!.σ%=ĢJĉAylP٥$ǀe#K{ zϖuaqJ$kљrn`},9Yll޿X[E}z*UcWK&1 [}i]M[osTv3b8Gs!܏ۆ]oww>G -LJ-͟n95ӣKGvK9|GdzrobYyrMwpi9P-Hƿt>z29gL 8[;®afJ壙k2#V^6rS’kX%\a`*sYdzS{:_W¹'jiM BV0EDt6bL9,A@oZrwkݽ_q2+(R(ׅoJ\?$(taX no ܉Ьv>]J}kcO7ȗb(?؏?~֟muL -^*cjx5WVմlUo a5z_YsǔF6S>iH.E7;CWQbj}r,6}SjcyutJ#-dIF~x~ ?V_hK@lô,i}rjFjKAs2 d%F9Ag L2y9' QNTa\Za3N Lb(5ѸR䀤I m"doM@)BRB ШvmZ!ܡ8YPQܞx1eK+^sZ4' cT4pPH b.DI A>R/QWGmM(N0U|Dd 1b1Z%ɻYE:&,/1 &1̲tCSCHZ!QnDdA2t!bRvIqQyWzB?z72M!pSF EƯ2oDMoW/Wwni&̟UZA09?O?q ~bHW]*g52c=_VB׮w?|_fŒCZU&\`,yU^IUB6N_ J">bWFKws^n"i_&(ۛg|Q%,XTȍ)"^+{5ί5'm2Lm#"1_d10,f0b Yd-y%9a[-9֒Df*>UW%v^UzV/ֆtiZ-G@F#ʅ,H:oE(-rFztWcT%u:99*W{Α.](ou]1꺴WKO㿄Yk;Kxɖy41jAlRDj>bTSy41{4=+d8?g*H>XFt :B-]Zg!E5Da HiM> k#xk!'(Q&dRI]s;[Wat~ h=l* 4@09^N >sD u99&NϜSR .ZQ[+A`EFI*lҠ$KAAJ6m@7MUhMY0&%j*"S%3EVB(^e Őt!zӁT>z?#ݍ8xd!2HQKNiʅWBb\Nz;$M}^{qw4m@[WVƠD@Zwo\cJ@C-@Oʱy +ٕd|HG>zdg`(&/*1Rd+d@%b%9q $ige*e A#JY# Y فR@Ҕ Mu1vl茜;d!8vvӭj[[uLݲol1%E >[vmb6= :s^K9x0_b@|mPXG9&j+rS^H C#u0p E%R^G_ I-} yF }nNr˲L+E~ib颗YCuɺL?Lwj6!i2"0TK{/ĻmЬ@==v¼ vL #i-W.w+8k(vMـo{9wx1gڄ3¡od?8~i<J"M^߾&߾7bȗ~ɧl i̫,8r8Gv$qTJz4b0~y:/5s ޠ%;zouQd@d%M47E2(;2%(zrYw !\:;fؕCz\Md8/d x2Q0Z4/jR i.7Z)ݦ TW{4-?Ψ1jjh43nW7Ə'PnS}3'}ݛW?7?d{[+~&S} 6W|cA oߓ־SGoi ~vWKzΗ-'i[uC'zV toM<9u uq:|>5Ň0^r8xJ6h4^z\)=Zܦ} Ѧf@b,f`ɢu9v!N05ιpdbFJ&kEKM5([VP90I$0"|ǖȹcI 53b2 a=pyic5rv:o|HCt6_(SLГfKA[ADRi5z2RG,sp-yiJ^J}!kI/1X0),9H2LdvEg}qxV~WP`N*;^|"y젏-#>b#>b#ƃ^#>b#ᴷ}bԇ !}b؇ !}b1J_PRJ/&)O%՗B{RJ4}RgqNu?,O-%uB6 JJ x B&8Sꝛn jޓvJ!T_8&B=JZD#42k(6>phCFuL&V]0vQ -!x?lCζǤWn~)Gh?bU.DgE9znC dJi[ԄE+#Qa/2C/@040艑3ed$}b(ATFqi[2W8"[ waʠ>Ϋgwq[T-v?fᱻ",:8+yJIN/PBB%)25%KXҲ Y )rF'S:&Kh>Ͼr1nT\ʕT +:Ĥ "5&*ZvRYGecp,>Z;n_N,$-4lg k[0G*]_>_;qx67J\&Pʹ`]a#E,EMl$D* N6;upB $Cdž=?*T (k#AD2f\jȵP4d1Dc!LoFʻfgqn-SМyt%k|% B rKL"*WEJeR U:4j2G JQ}wbQgP t q} ʓDLhFe$iOE2z2|$`T6@(vF][d_,x֭wTa55lЇ>Oݠu\RY>^ʨ-2%3<ƣ˵#j))TxnZ)< đ#/zDu&b<} comQj0 `4,ddҡAQ(Ф(geh[ؘAeLTJ:bjV-ee=ȹc:?"-QtۦR2L omƹI.n/EnLy-8zG/˶ H^=N.޹Cv*R]"HE\rЉU^B2h}#C|e&]Nk&uvZԖR=3$`wd8, z`}զrcv+qvAUğGy8]<5eg >ZSBB%"Ql yAdER0ދ\I(P[]IKYI1AB[mokY@Qh $^%C E<{FꎑrhoJpQ*HHf|DTJY`ȭ&UWakʘ;+]?&jըkuzRMgΪ1'<'շ'[M* ve/k˓fpFo},Yh$7Z mU#r][o9+Ùvx) Lv3`.ا,K^N98[;jIZkIn6Y$>VŪ~w8#C2}shM8 1EXn٥Sx]l|x}lMՒD 9N l$F:5xb_7~lW̾"9*flqm߻X-7$*woۏ7im ▮ۚap"[gQhC +|̣W?uvI8VYꤓmP̴q>͈ dG>W}706^3Z ㎅U*~Ow_zc'M~xކaI}29"U&8Fu÷7.|SEESE K*Sԯ ߻B7ɋwox.߿xFKY oMo ૦Uޤiaen|vM]niUs_*C>Oq]?*ᡪmO]>{!yJ>zM<ܼp#I CpBȠ3FdDvI'P)Pv}a.e%>Á%#*hB:Ie$)g@R j6(&ru:Pnt?vlw](.=tv]ZW㿤1>KTyGM5pˬ.PP:FxɍӎQDoMpMz6 ?'hy< .&zxMGT *hÝ pIt@kpҩIB (DqŜi80l:Ĩ5p:A 10ڱ9yu3 )N8it"hSABP+ <;%pE4 #P9Hz}.)ۼ/Ƅ2%x ,*DMy4pF RH>[>ު҂HJ;Ou>I94Rh@넏[ddX 6@uFtTn,W'?]tRJR I $jq1!'x¥r$c)cXipTMnv tF'Oۄ${%pN >sZyMrn"ܑ-Fuw!Zգخ jqJdIIBIZrv$ :2LZv9:]D*AkcOKrjzx6HOwPE ?Aq?5(~\~xmsp5~zey\obwl{¶μ9u$Y8t3kPs[WDХҥRIbRЙY"EYBߢB) M Wo^dțWv]¡ᣟpt_(W"Fq+r%!8v2[j'vTœdE(#e\@ ؎%lDK>heƕ 1)<9K\{ɭ%mԧ!-S ĩv!ă2L,h5T"=&b:5o%.r_ȥ.7W&ow_j7mNܰvr4t Rx2!^Q @dIVIbry"/7Tjn^0H9׭rF;AZR0IC21)Zx<'RN oA28]b$9rv>pڽ1Qc52QnB{2ad(ʄ)m5i/ל8OW7X22\I* Y4"2&Ƥ8y ֳ+Kǽ$LC1"J;qŻ./=_OE~OTw2]laFTRY?JdfFv]9C"e`T< 5rOP0TS$bƟ.($JB &>ԅ^n=xF{4Rgܥi:Y` vDHG4ǟ?h>CQ 5PDpJ9E4^`YQ^Wmf۳Pu'&'bfV܎(&-!mkh~7e#OVUkͻ5 x-G_? NQyI6R/MXduAZ&Kt2]MHR9O!qz^ q,tJ`U Jn)`T9kddY ;MPvpX8(.;ax/+1cD\'!(fRh8&10L>&;#g5b ڝqdž [/DE=i!Dٟj˓7֘P` lD;ÄvN h!fHh# ED1 `&Gv+~ⱚ`+ELjOxBe2\XcIK$ě5uU#f:'mH)!RQ&IEyPRGeTGcU|k1"vFjDl9Gӻ%',9h1\oB1+6^WHrO(#zq$XGn ܞ\崔v*7 >|GAmQUz ;6p;z8q(68yZr " I"C iڹI6F/$I 0DnwwҵgrHvHiƕ*UY;mU7Uwψ\JxbBYZ^uJϐ ߧgs%Kup+*YnF{nhVbKNQ֯v~Uʛk:<l*_Qqsgc[,z$5'ݟ]7{K&s|)V^Lr;%3z.C G ,;SejAz*LPT`/0+zDpGW\%ZA2• :"BWGW\q4UVC+r>y ^\vLp 6h  \ej9;tT voYo9#ػ14 ' WOVH܊up%pU=FS/JPu=o )nֽyN4dEoc9$eNJ kuľU2)avnE);IM -ڒBB+V!4fmG;催a$U|i!vimMrzE5"zҺXt):Q} ]&|J'{X@䦤1KuXe](zQ=5 }m.5ˎaJ$zq@nj ָh)4"^PTl@t[ x4shQs#`BI dN@yU벫ԔY@h4yֲtX\Յ1cd4LCȽ Q)X\I1?턼[-V-t*x4XYi)̨&4: 88HVG!H0(k@oB&uT_@e|=cYb2j2in vEm%bxeY j3xWh%WKo@Ơ) uڀb(4(,@6{$؍tt)JQC(]e֜hr#nC3!vD )_J&:&|K@Z\AN[:*]:∬ @M 4͙@(qHseԸQcj Lڲ^c5ͪ'E]%I/[ܸQ4"/z $dCKYF!Bi5 UV+ 1^ B5Vczhh;4d36>/6n[1/EF̺(NcB*&6OkpD9!Vڧ=!>dU4öjEo֫^s-*ՂG{{x2AAI|4xK!#/ፍ@Ӗ4dQt4ҕjI#J2a1'8;ǍLj,(t_8g@Z PE"LԴjPyUA>xmm;A"[7xѴy{KH'*@Pã@܎m}G V'ES U_~^j(<m`^֊N"cpS٫곸 d}E XVGCa8`DP(c ޣP.O9g^Ls@ Q/сwK }CGzv@ @z RpUf}J`@;뒄 Xfl~%dl%VMSsAn+itҖEs64IxFDj3+j4VEC,>j,e4HXem WM-J M ڪmryX~=yyk9\:;ȦirѶ93IV[(42hfU:9lѓХE0I0/4Nd2^SۆfME\k Q^y(Y=y`4vMfcN`iB_HrR{R@c8nJxKT]mM1WTs\@7""U[h' JTEJԃ ( hTU")RC<`뛶] zG]Q("N>)ڥOn Q"gO:|Wt&ePhQLj0R܌"2!0xPuJc-и'`ҕȪT~M6lӺ r0Hf -8֤U 6)j@%HhΤypR 9'k-Dv *UaLM4fH֫YUqڂi#Ǎ>N`  UNWP ̀zreR4/$ Lčr(KD9)Th5]A=u@e(64qEOt x@$\0 Ǎ6&TS.*w3 H&"ˡꘅG@VRpIFi)4%UZ4V:~'ЫTY~V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+q@4>WJ Pr@V|%@Q 䕔@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X Jף|i=XݣKz7킖S͂Kku^w?+˫W@p G?%dAFk% ,\z ¥;]O3:Q"}؛tEF/ _z"+,_cmQQF{\%]ֽtEV:A7d(]M2A!dzd4L_cZƝpm@[nvc (o\yN+Gwa6CjL#p3{?.S+ZK55QH#TE)ͷ*k'}iَn7nׯ~˫i;hy٘^{?efnmo&M"<]tQٷpCYJup6i ӓ~!Ã_qW{xlRVC`5try![>U*fFrbWx}va~;:"{׵K 8`j@!{;'ntryc#\1;v胟" >Ӟz~;8]?×>541p}32&?|ۥV; &wxu<Ԛw+a7NUj;SAT5.}?yMtqWY>{`<%3*]|%Kx`/m6h5 =Mdϰ-v^<n`rq2_"V#J2f&Fp^#ISK6xHn@6}_@XAw/@AtSqw mϽ.n@|K/}P/N>nQX ݺYtuy>gS6S᪕v:=4 'JnK's22{=]\Lh ZEX.'Viy)IZףI^DsRiӊ:inmqb #x2s{q ce_76d†eQHrRno>ְ߹&x";h8 {;|Im`ph>+Y~/f邑{4 5{3K0X}%k|sd8׋M>v{;[=6C,~zEP^V{}A!!_гBȪ$]ǭ|E=_Yۙo201>R ZKbkd`Yu%f.Ҏ Iuv]J2{%E]]Um-F9)۟b dѫӡ/M #r w{ɧS*6]Ivk]R))B$L)is܃"x -|`}7ibvSU# J'J*j(E5v?4=%=ْ)6Le7^ބԕ'M^L9VBViilC}hM |O>KeJꢧ*/1ui/X'wo-rT޷^Nj ;hr8<)#g>D~oGT{UopDEtٛ:;Nfw:k;$†a,_baI f~GGsq6ժ?VHg47Ǯ'Z2ގLES/‡vx5v;)m%/{m.SmuBIypmms! ^~4QF ʓ-٣)u)l1GFѳ)n=tP/#6w-͍H6#=|YD8 nNS,nG%!Q YjUf D~}iUbh2ѤG?Vڮ&\"V]q ̆G~Mèha7Y~a<`]z_Lߖ =st>pZQ>!7YoGVl^ȼdogJ_좜Ux'*.N۵7w7t Dj_aBMe Ks1RՆ]\ -fYnB[NGgit/ ~|~ÿ?}_>:i <M_6 <I-CkOڶ|ł?e\lqԼ{tN3ꈙf~0w&4&I ]/G+a+I>1X@!z*lIXGBJX[ G6,ѥϟ}8!̋5aU_GQKB(F֡C.?i@:hҶҘrpĩ3_._}7=lO34'Vs"L:{'yGyc惇qkxnypK(~q3wܴhb֠ KICJNcQIS̼W "M{F4mK]57*`mĬt޹BruH5.zbmHh6oAy;-իˋ(}izF]{l* 4@|rң"]O&c & RAAjIRjKƨZQ[+A`^HRdsuJ׮gd)(0޺ŋgGjB;1g\ q<==|qYd!aXҔ #T[9aw;]幷w='ְ[+zc`<9P{JzFg|0u|. jP: wt1E NK5zAV-~}r^d |񓵯]TPHT`ɀJJ9>Ojj2fGY فW)"4%H[Bc7 M-gUL{V.c/+X\p" &Qb͐):(X(ւo&,5oZps }jAf?ZJvMYf7'$ ^xq["}9["Wr%Cd1餃DAДjV㬟!)4I7ͳf`πq46/Bw>Fj%k%cλ@(!@ ^OwOT^?JeR 1m5iYcm)E݉)G}->y|YF&a>-ٓ oF\q1m6 p;|w}~j+sߊvυn 握~J ҊI}sWJeIbz"Q8Xb^Q6`a~>wT3@rܢO32YA:]^QA0}Ab#)^ONzǫr9tr}m"iluRu[O쮾 1=E|:)Oήz~egBaUa+RƁoyZaajGc^G@NDR\~*-8,ZzBv#~Ҫ5edY۷@x|Ha;01-Q"I 9|;uhSTOL*0b Fbku/,}b5W6*PJ;#rq`u>%FCBmAk*ϼvh -  /h@oW]3\HFdY<1BA8В$UY;`U.0B msi_ۂIқ~zˠHwEkǷO!_L:|l]qC#DCy ?S c}Q:{d~]Nb9vT}VF U}X*WŎVm }l䐅Be**$kxk4!:>C d)b ZHʷswᕩ(5;dmTS0 Ѽ|* m_M:]jp:uoV?]YyLϗ[t Q:d;p2uhrQ0Z5^f'tzMҢȝBSfُW‚ك?tFʷz59e֙eG/1g ubD *]d9hua @[Wd4߭$G.ӛmT>+{%ԉ6&!NAJ`:E=+le16!`QҲPL堒L"ˍShAh(:'|jl:E4`wGhjճ/UК}Up8շYfz(YТ՟o{ oޓ3 gQy-wVpiVm"FZMQyw(!S2J.!9.Z)H!+_u7OEKГS-Z`T2R Ԇ PZ#cF,$Xlg싅-ggj-ƌm8[|iO7/._ǣO:2b'K01kj<sY!]Z9(5w9%,Yo)].P@J˰E X`%"B/FfٍbB+e_Pq߽3~# y$->0 mKqZPS6lM^RFk ktf1SY(UV>Y&Q f1$Rm:ɼ16n<) AVc_D4Q8 CmQK[@6&ҡXJ^$h!qh=R$H(m\/X7b@3b)l٧b$$.j:qW o܀%ZW;P}88C6Wu󒕐'DL ZC宮&7AL\K|gXUt]ΰJ!&nI.Qً9QHտV,D)+fPX,XB(HCP*wmmgqm^7ydw63؇=k,iv&ί?V˖mݒZqMjEvWboֳ#U Yl'c{j3z]t otsAȕjMLW|39bt7t6J_;ͮg-?<^-=Z&ъf߮/pbothy֍ig.'~|+zjΆ?{tusd}! o%tbW[g67RrBnR̭,rՔmr$Fwo _==#pu|J`vdN\y$Iqj+)Q+\v}0O(ᔘ,.3WYZ!W(%\bD&N,. \ei5o;\^!\q1'W(0ғ,-m,^!\ZVF\ŝU \eq?Bi)m,+A T \eqUUdW8sչ\ȏB*UE\F1a3XGą�7L 8|9ψ@ Θ(HVzq#nojR^iއxx; g (j{_̟0>j/X {&E6g#seF! šʻ +?c_}eBHz=ږdNB ι+YRJ<P?hirV\X x^Nc_ɛ&^:u@{e%E(ךY +^O9/~ lszmH)>ӋA&w6Qr"RHxb 'Am9Cһ(vBI!j4Z*y*ֻ;g~rL  L:;BikMR^!\i FOB \ei5k;\^!\I,9Bq9U؆\a1[ on'.; #]Nʶl b\)Ґ+X ~2p=hBU@ ߱:z=pŸ!\e8Bq~9ҲUr}+8y,z&p7FsЫ~Sę,ӏEsT :|EVyuΏO?Uӛ_[|ȊRxRY ~J6?ځWI_&5s++"@*պ pQIET.&S]:zQ>+-7T3$K0 d@ d4JLWZPTKkM'6DB-MEew]>ipTȬӮꍉ>S@jSvucv WigI\oT:^K,2u$yK]4o3|n]ZUi}ٖi}`GdhWRv*vtvi^nGg)hG ܛ1h9_ie׀-tD--/g0Ft^d˱Crж;-^@Jo#:|`;B@hNo!A&+FNp4[*H\@D .hf 9H(IaMm&G9Ka[1qr\E6\;TK1hq42{p~Rqg_+opq&*u"ҁu V(4ոzIHV{exY=TCWq/VG|PG,Sf1GrH3_hoUM֞8)`0W X5A $Fli^%pֱ$tVMgת '^JEѠ N3@Zr@8%4Jq.,O61 LxAi|b)͔DSĨ1q(&Ɛi5sAG^%Ձϻ8ߒ;ҡM>G>KS0,ϫ2$n3l"jN)8F$"AGuP͑m\`x;5W;MRmo:-sǒy @OP[rJD҅`8Ef,Jͣ5c@DBeQ*|zhu#\wPze{N>:y DռwmeXC\;rZL MEi0 QV*}-JNl:}DUvXmj{J?2 J8u9-*tDgc4V,("cE &$Cy>v8*hLR8:6J0ǃЦGop`2Qf$EOu mm"aݘ8 wEg}1UU87ƹgcw|s+ď?ϑGgPm =?p*KQ2e*83&_LTZg %70fw0h}8TYk5#& P"dI `[Ju…ĹCzC2\-c@A̒|0/%z~Frs[XLD";8L4"OJ^b$׈B$))J-F"M:yi 0ĂꐧMnB؇jtqFVD]71*DV!8$i߂[;iy2_~iS X: X\ȈҸ" p8TϔNI!X &VP[]㋾ZŒ5)h8d2NHQӖ]߭󿅐 &@)`!iWMZ'Π[ޕ7:/~o=Ԃ9]Bz,gH[:>6,״tIK=EOVLPzϑ1|&lxs@`9U]VMd0w+:êuY28GGM_/OџxwN5 u,1K  Z%:.H)xDhsx"I2:'_38FtZ Pi˫x-|>[7H [U'|뉥y=:-/&)m>@nSz``/S$k )Xhc5kCP!מ.xg0&ZM""-PwJ.5J\sT:vƃ$8A,52^ *TG#E$5.--Xi#1P1q6p;H%us=Q/_v f5i}L`rHLWWAPA竳%.-U9^JZv+ws , S,]NZIH쓖'YwT\aϝvffĬ-w@iWCZE8Z"*̀x(51.1m"B3E(c$pP8q|jz"F24C4̶gËz"MXϳ$}_ZplD̾f㋻W뛞-y-K{F!FhlT&X*&c@Vr)x=Z˩|]#R|Nn˜ >\ϔL)\0)D qWD"rN+v=V iv8bD#Bdi %"5ӌ[z\ 4@xƍ sYNw~i)MVxk})9̖ev,LQ7[K-Dg#1 剪-;`_f Û,>2!ҝw@zzmof}q?*$0sN3SgCcL1\M~h-W}cc>~7}<񿕫o~x{{sUn=9*~Ӡj:<휓7  >~rq ̒"K{eݰp"v3,pц8U3g1n6C͊ih^Ylz]6VUd{-iFܱ@hsӷeR/9=caaUj_oqwX2=AH & &*T*J]1TtpA~~_˷?\b凟>|/7VuL_,?_&~Ϻ_7XS]cMMM5MyI{/ס>惘G*! U|I6Q(J1.ܼr#I H!8!d C2@S"DaE;$Ǔ FQ(@;o 3t)%#*hBUIe$k)A+F ʼ\u:v@c:&n^rХ奡KOC.~HK 1>~Nq'rwmH~t@c`g`'8$tV}A%Ye2eIi$\x_>V5oN_ .fp2ɜT/p+ss8C?]N?K`zz^ol, '/ؿI ;<)b&է bNlUgz6$}׌rr:'"I)ϙ`af C.g8[έF3۰v7u:u~hv=7~8`<;\}s{_A-zƎL݀G;{cKQǓ/u!u1zu߯8ވN\`J@>.z^oL֍B"x<@Y)>jS@)`U42z>єsBC92)Gm&zz5h|9zut#]'iii)neWr^Mn< Yd }&5Yš'VDp1V[bjD|nz;a"Cmc;m%KبUlX-r`HB`% 31)ӔPE P$Tu\@H9 )RydH$\EƄfl4Đ >0:8y;2aH2Wyuozݔl8V/W="ʔ cTZ CaU$Uh]Qm}%krpK~*.Tutv{p[dzo}=ʀk~%}T?L.KIGSRxsңޕ6OhAaF<ȴr1>3A1HC+SuQ].JF܎</Φn:gc)n57 6c~nGu(]ʣ63Ά-.:s"Y"- IC:GN>{l|8LIM[ʐD㾤 BB h,.A+ EP 9Y+ f֫ZCVSֲb,LAs(M.56g泚N'u^ -rm6NLW ժm󰏮&g[V(â۟obTUwW$|&ִT.C{EDz*1Jg%dq>P†eHyN##&AZ5tl!*1 O HŦ''T|T2R0LFRj TkL =Rb mmik App)Q}ϕ|4?9#Oy2>MWΟ_Ѕ|z-v4{ ReX"H bA S% b Ĩl6X/$<EMId]cL;^sy(Vj/(ͬ֔j8щ HYyK$/uhLj,aYArdhj !E%P"{ 2Jr0p̵`,Db24YogN5SƃǶQ7[">Z>Y: y`b1)Pr"g7W~@%Qbi;)Ⱦqc%EUhKN`%&SYoWh%+Qv=5>ʟfjQ#iA$؄c"8uDX`8^׊c)WxXJ-e_ 3{'Ӏ[ I V]vIPR(z oO==pm%KtAQHP,HG;|Q_bM?VXKbK"w@ݳ8OPva=}=e}Yՙ^Lk^›7nFGE^<+8ӍCozX 7>|;zn~swk]^<>ޛʎ^ݼǺo.^Y٠[W>>&w}ZF7ɘ߈/tG=ix&QW#eC/ݴ<ԊJW{KJ{YaynR➖Uۮ0_gNZ+JrtD, œ5ɣR,t+Jט6ޅU%~XG_]+6͐ Z]Nmqb2CYK C/a/; R qĽ ^ o#~ַ*_!f=9(v;3|5^Ä5u.:w|~__`dBNb)9*$H6evQ}zlWauaBA+pݲYQ$leM!`r. [),WuxUȪVtOο,g/-o_|&竏k6-EL&/JB& L)Hm2H(Id#0,|Y58%^J}!N_b\AZbXX;^Y Z#5eEW$x,HO1+zr?(1Y`hU\Gzy|dR) jk<"s[8U\c1WUZg\N'ޢrFGd\Mk\UqZuJLoޠ{8sja\Ui*%+;a:ws$7z5WOV ]=MJs` `7W[z'Q}sf? ן:#p:܊8yn 7փxNŮ]P٤BLu6t~-)4Gpt$b,l:]p&WvD@msJGNn(U+0p49q o49}@49݊&P*T)[1,2!;]"|XEh-YPLSޏ`s )m 9KZ8|M:J{+d,L {Kkz@ -HU%9OO.SSiӛm~lxMoBc4tJd)ZDI8"cc(dS9 e)!3:<8!HY8"G%"HT}n߸PH VφS'З0T6-Z,g,Pl+.eSM?c2_M.^i^Nih TZ+0cP2RzspȦd'tcc_۱Ȗ-DjyKp%Pԏ-|D>攲ɭy8qV9]tYNkvW4XdL1vEeUӪR.9fHAu_GNX#%Lo&/r WxfՐI`$Bv)bWbgog(-C]<3ʽq x7UuxpOnM3<>jO A zHzyoUp'DSkb*އBI!GvwOpzգMܭv=?ɑۓm5GZ_F E|(w=Yx;uIMeLThe/H9|ntAt[؎t[>j(BV\2(%詸<p5OYgNcRO4͡؞M08%9d[m_w'9z 5eʮ2ճNUdT1*F[%`F-DS%4D6ϭ/%04+(ba.JP+q3q+u ?O9v TwHʼ%j-LYޯ[!?{W؎d Y\PJ))U{33yB@c ^~ 622VDUQ*Y^eT*aU#f6o]nEno랸cf=ua:S|aos֞-%32m.lmm\.1Oy\ڎܮ69w?}I8;t*&۝]Isߎ΂Mr+vAšօx:4GtO6b'G!(4l> IcT*N#H)}rҨIEO<~ߓt>&1*@G\q\f>e2̎ :x|9/ϋ^:׹~FxC&J>J udfٰ%A ) fElU0NZiѧOoܳ%-ss<#ZTL1Kѣ*iǴ@B*%U(h*,d13 Eq1BPJ1c`RiE`NZAqB7|^/yԅˡ5}Nt̮z ou;T2ȕCī5Ȫ1܎.x{ۓ.{Bۘ:wf;bpyɾBCZ@$lBC)NUbXYԠB]S t 38uOl6`T{B;J^U\]@.=SB;;tR{GhrѨؿv$ U!-r}G4pє$-Y%jh!ST$ &Eo=ƨHguRm+k>qT}-3g8»/?ӌ_6 *BƜCpdkg># Vr&j3鍷NW8iȄfz7Ұ|`=Y@ ,|=>{y{Wgϸ~.y`/d5AgiЋd' x zVӳDbUҢB]-\ f $S΁DBy6;Hi R)>D=:WLA(EG oiFi g=0zvi-n3v+bc~GWn*aOQ ;p|6y!@nU`6{bwH e\m\PuĄ-4$@N_/uŸ "j7EK  b|̆TN( d-xCb@YEe+6]5lugn?!MS-! q$GnsաГwH-K2XRY3uޢ[c-MV;\ aHd\YR/吏L[1Zsr>E_QRqu"\(ǪwqzP0>aSun۫ɁY"Tي&KM%KF|^JFa6`MPI%x|2JBUN)v';QQR##Թ<ƽc{M.1T zqIk \dB%)"C*J(!4>}S*RKN\;}39-)WX_}jM|N6zR))V%TQlPU%Ԋ=MTTP d{܌.!{X<_H rBAdV$MIo*TSp$ )>zTWōԁ9qg@crAYKqXoE 6!JK1X0%oHfA78g'ƺ-p`hݠ~ fvag:R^jīO/L;7ZTliK,:%*[QE$1ky匑 #>ci\p[WCpIܽn;ʼn'4{Hdw[L5wRoo'"K4+Wm WٮWLz{?嵧S^ ~4G#Q~j~xOij`^J@LOC^Z5J6vO룠US2V+1el( a9h%^bDXCx=.r0\:wEFy,Y;x2b[Khv}ΈǸ"*4h,I)U2#J.*ɔsm $)ERh T'IIYjgy}:(ɉvlXyYexiTe˳hw=_Zy}rlgmv>[eU Xirbh6ڂQȔ R2uq$ʡNLeRdiV%"( (&Y lٳ!dtfA Wq2*=9mL 肱=eB䵄_$Iʣ>-@¬nh6D*n?c??kiĐ@:LLAa&@%Fy= z1 !mc1Q2-}'6@5Ȑ[D[-I72j9?!UjԹ:1Ǐw'94~P? :gk|4v1~'2uw~͏\a7yOj8 {@hZ/8 R;yE$kk\7]@ L YG[qznRu<^fc> ™mvR,Z<2>Π<0=?Ӊ'$JwvOnĉLZL^f%2>SDiܨ7&婕lM|k=|/0ޛD=M[<5!F$TOW1w> iq^*lD'Û?{W֍_fg(^!d6l8| EڂR>zoh%zOdVA$'aƞ@jS7bc7rZdm7˛51-ᨕO4e)>n=տ28acmu>Mn+EHs)+>ci$FW FSwFmV?{~Ob ˾`b%S@*rT=Gs4_M;^5RTqjکLNb1_HHՏ?T~?7? uz MtT=DGGEQko޵O׺gO7ҩy7x∙,f|:2pldx.VnlՏVî:7$٘m0H0(Kʡ'= s@L.(ѭ sҴK8\&T"[dI -q,&~Fqg AEtIΩSũsQ/3-;k\fbFڹХGycҽAХ5_:Od/YY/6|^)P&F @&& t`Ti& R?o"R,br)F](1MY'G tYIbU}eҩȌ4";gT;cRy0_uq<zl2Rb2 5DAiB1HN)N2@0nqz{37|oqO7u3ӆI)G޿] ƿ?l 5ɳnWuT6:*ZTr:*T :sƺ^&3#Ŧ|.(I 7Nd uAr[O9?̣p"eC4%_ TY0[ᒗ 0R/YJQ04)Z`XYwvzv4-c)^ŨO*hC.#j°1d X 3Q3`u `-xo: m.u,*F2T# ف/Y'9m]l#!D"Bp̢D4#RQKtj1q<)%^7smA<2-yR;bdeU'߿iHo bףͥ]W txCtRL9Dq攮n/ !Mq?N gfR@f|=t${I4 6(h+tU|!Bt҃VHXwȴc e^md^K>Vĸ'"$~9ԭ*  9cV|.ͺ#a6K;O}`+C,-Qӻ4{\DSzߦIn}ikpi^ݬ>*̸km+z0fu|O~T.wi ʦ D9F9J;Hu&H%,#C*˷Npwʠ)eh;8)U }(Lv#-taUqu(CY;Qmׁ{+`0~bk֙*, V7,@jiXYDq){&{O_}n"mKZ4j8oq=` 3G[9Va]TƀCIψ͕ӱJ-'Ih3te(.t@4.;rPK^Pã(Kt ae9V9 _>bp-j=cԁJ[bHUN#G-%IЌPfN֛@M85-uD2~ ,BF E$V^Q(*3q5rv3uͮ^^80p/6K;7BW6&jY.:UefgHr.t7jD{7C ϋMH{$gV`4LOpq]}._ eoЧѰQq+# 1d4H󡴵/5{U GPy /ޙ7RFئ/w5kfߋ1Wzճb@/?+.HSg++>cv`۱}dbo*B&peLº1 b΃Mh5aD1rZG[cWHĿ Ir&Q"D1^?{^h>ƻ2sx&2ERaa?'r1uBENV >:} Ȩ1-%N`v%Png2kJx"i?hH'imW4,7 P7`xlʢIYmpAN9&swӻDR:ħ)Pl+O?II'l&M2,w)a.F\f<4Aj?%!y~:,AY%4:%sJ90Y,HZz|x3moHFBm6v=y:[=R1,&!ZX'K8Љw.&b*xg'q! 3:ɪ0^ciȄ 9e"6)E6j$b̬ER+}eyXU緩`Xk*Ue(;Im;>Eγ !YcdR<(F8C@pVw3 nH6\ "F&##K\neX=r s].9P.."M';x"p]'`}mMn'7骕;KO±z }Gd}WB(_E`Mf.B|oR?[1` 1TAKLTPPGI}JQ (U8KNu' O;39 PpqifAcvGӳSx;"6-/\7]Z}o'ω ?6֝zyydGw~>mfComB7mzhݍrқx4ʕwcgLžw4M_y7g?y!T_I}Gc'' Hr#(j5WvFR,UO776y.&:%/'y'WYݗgOݳf؈>hQ}P`ω@NAxl^]EߝmoI^LwsAϾ|wkWyfUk x4~qtJ p 7VGhQF\ SIOUWfT^eq^e,TPHlat2>ye #%βL)pfPW^EKt b_l3ϲnVLj炃&0!R )ƣuhg)̑%ͅ$}5kQq,q 8yoS[rHdoUUVݭKW1E*"ٺ~!)Q2)Q҈ZJ,3z0O7QrǤsmYYT 03S2AHRkNrqj8 =AƬ մ-Ѳv p M;D6)$PӒB#?9]q\,p|rh]Ңs&[ jK\ș2GpAg L2sNp'}[ SKs>rRrZkJ%h-EHꯑІ@dc, H##EHJȢg%kNP}l&Taif.?;RVܵnJzBM/ҀPHx$, DI a-K,OLRZ6Q$yv2R<#D/"Ƹ(sYR**zW2F]EzLr;Dci,ȓQCI+$-,H.],ynF)7Now./|sWGwiZkRN H!C[õҋ ¢/zX9~muFT8Ѳ4M1{+9ƙC0|Wi$ 0V< aP9;8J.Lҙ~Q,x4u ^uϊEI⧅%M/Birq l^gnkk( \QӬ/fLBq5<8 JEc48&բpc#?oKw<* OII <~ߔ~sϿ7oSǯ:i @:x?=M>?n5B[]SMVm>|7׶k~iռ~p1l@Lpd=?h6^2ش뛲Zs_ [w6,n.::^D4J&:td)Ge^i(В[W&MB{:L+I[LZ.G\m(ع%bgvv#4J6h.2cwTѼ;&5D˥gi5uQ9m-)SUpzfPIF}^-q6<7A0xN+k2#K6$L%R$QEI&dCUYo )钦)!)"LjeARٓF<Q:'d zUe$S!1'BIy$hLΥpe ),#[قmR>Z0F&k9[.Qa"DLfB}H\Izۦ]fo ]{6t`[W^Π#w ٥`-U"fφZʓin( 1S#2Yd\J42iE I'?Y J1\@ TQ"b9;eԀ< Cmrh+wNy9e61x>i-xkKA(bVz&wA…[ԥd_ tGEU-^]0M/arN:\eoٸ]{v#v`,K9\i| | SDjh-OE`F09hڇ"Xm xCtB[b {&Z%G\c*Ĉ:eyV;`Z I8쐻-Ĺ&^!љG֔v}3^n6F m[E0oms Sn/1iad!ETG[=^m3Xhu,,WVw>d̟1WO[`-a෽}#F#[ΧeLj7qI5G&jID*-#jtyY=-0moVnh9}ePJDV3!Sd¤07ĝRBЊ!J%ζ[AHA|r),UFe疤 ?hEb綉QkOnglw#f=\iUwyB}er ^g;^{#c4SuGY2,gu67 ekdJ\2Y0p:Ti0bUiwB` 1:l3z2C'Hʈ,,H]@.@xGYaewlOR;w|k;$lM7GO1wD1-0}X)m_E/틴u}#Kۿd#-Q8fIF^ [U 1h &S;*Ym}p EJ,[S!_Ҥ}0=.G ͵ߑt}GVHnHdng8`s]1f!h!S.Wrvx<͖{-Pv%]l Π'7kƊd;8%2*<;9A2C>;UQȊ˙ۨ-cEe)SPs(lJ-Z/RIZ Rx(X˔#"*J/pE]"%> _=+{v S~zywqmu9XWhGaR\ѯނ~w_'ʝgCzۇYM^LOs߯H^)9kQըyP Ыy6!+ߺ+oȌ0^k8GYe0!{i.)-<>a,9X^>0>;Qk6t|qФ&6cjfѫQ3[Z*6mfhW?#/*neX.ehF8oFi7blVV_95cW73^ҳi"MǒԵ[֖N=[FZF nsz?{FJ#,( \fv3`2v7Hi"KINYwbaٖlYn۝QSd*X@녠3lVBrHa8R'-FeI rA\SYauh͛ ?xfE+sڨD0Rrc+gRֲK`bһvsJ+%!(|Bsh(X;&fDvn;[kܝRq _Su$O~I#ҷm͋vl2m>heGS̎v#^}pDQy!$@CZ:u$' -#ܻ \2QK2Pge2iFw |p+BjEѷu=msEK(02!eRmsmuem1K&$"i+wG@ %O[P-LuZ?5ޚO_p*^ >zL-mZ! [:,Um9dko@'獋{mͽ{T׺kէ:5An{=!}%iϟ~>vyo HyX0;.,()kXmjp A maI'DS"uiA$O[d:2}fe9ۨ'l2|T]СCg/>?76RO\t,D-!ia|0`6dյ\%cLRё6KDxPb H(됐Q$xv]$H +=g0KidSuZ5Z5BU#l`5e:9GØguhI l8/Hsp($(<$^#yƓ v }n^|uG0o ِH+ 5@d .,[IO+0#y ^qXmSDZ,9M)\Z R7,2@! cc0Zy\_Zֻ#L0Uhg{"^mn&-6xaE]Ѓz:+Zd.LY!*@,}2JC蜗NkAB+z1ޖvI)#ɴT%zDZW;).LҢM}>̦LS-w--wNF;fgmFLR Q- hu^"3j18o;m(`/nh 86]{ H! }bGi~F;q"D/" 0&RLM5Pk%g'RIGfX"NvbS}-YZ$b sS\K%Cm: drGEr[IEA>2:z#g;Ri=y)*YIB3vO7A羬 3mN1^OHM>}(vaeuMS*a^7~(gRJNa_sDdz e& :M) qɸͤVXczi%x8+;! q]!6Zm*mr:sBO ʬS"Z`#TRӳ%T[U+uz,t.٠X~OHhu֫27xQV S_g=~PЪ,j1D*)hmZd>&cgW;n/ZPŎԞjufU>%#< (IH{B΍0d!)\$ L9*d:DPp PlܓjLB9CZ@xFZFcJvl2qˤP25)MIL[DnCso<5іrGNΪ˝\1d82{27B́g-0dLFR^r/]}tk{>v^*VRcF'a9"XN"[gYCr8"yb*`@} ˏ͆*H7๙u8 IOc ,i҃>/p$B:9ogӋQwSM0<{Chur+yAZMq49.Lg]H2> ТL&pg=d VZzu"d4Z"if `kV+BܞMf6__G_Ҭ2G?,F# i?շ[M  Gao?BzRVu#735/DxC[?$wiPCFx6(6iܒ"P 2La-%O4 /3iųVԃ}d,ALulZn1ߗFRI\[_VVSc:.3E^.译BGGGt܁amEswjEC.15! nMtNTZ¨ym32Ej޼蟍7̓W糓ׁ bHr[σQOk+ڮGxy״V;L`$f7jHۆ[Lof}%47Vb9ǣrcN[&'煼6*.'mms"uZi#e`EI,]OS5UW㹙c?+At+ϯF`ғ,,Tph<9L袡שr[im>hb?|P~}?޾!޿߾yER` R>_F}7vD[Cxˡ]mz໌ڜq|}\:5/Oq^P+ӕَL 5wǮPߪ]Eґo\G1`0=`PJ59ϙ)COjAN]({cÓ¥n.:Jd>0 8)!26%H6N3(L^wTP]iNSN=pg{:A=f; ;&]#h(Lee*HRuQѺ Ng?Gs-\b胦}P7`Hl.^uv~/jY[l&z\]yQ];?Zo酷ujuLWY+=!tpOǭ!CnM[ f\EJ<5&)VǬ@x,9&Y:jd١"3҈qҸ@," )YYJU>-먭sxQ&0! <$&rQFí( Q% ){tS7)1bJI)0dsaK*D HQ69[7 uyelf =`Bfzf }VȍN`J*밣y-C.PCt&$2( S]y)yl)?37]]l o/B/_Rt^V64^ .Jr @iJv:$AUH.9;gXYːrd |$q[.EhT "avF܃ѹZhx^ZyGK{eHsEb ܐN -[Z#g0RC^۽%w!X>q`-+!g&鴽Ć[эmפ_p3#ŭÍ'ڸ* $2M(yY)|9~K S#H<832orh"FN1[`mt%K5`L[M2q2Uk32J8_V؞pScząk ANDOӑN[/IӁr{myK i-)כ<]oZܭ\AW oIц;h 񈖘Tv3$qfgt ?P4Бj)e;];.Bg["/5KVcZ}NQs~-Qύiwۣ.ݸWnK Eu}d?W4"V^_o'4Cy*uhIU3jWOt"`ji)H߽~FFO-xb߼WMfW`䶹˖{E 7Z;A{j^$G|^Ԭb ԎtNZz|A䜁 =QP"A1$J"fD(T9U`tTE&)(&Z .)l1zHBJ Jszy9'2F  O*dZz4O fhAbIQNk⍲VGαy1V R:ILJt Y R2H00% lG1) kwɧQr‹ Uؤ,,̲ f33bPeR.0!sHɐG F)j,z`TpTZ X rhH2Xa s{219ÅBr9ݎG"A CF8%8ZS+@Vo{Y@起Wס'4>8K^ړHŵrW_Ȇ o.QssFd(i'y+(Rdy|WQ̥sy[=AԁpL& %(hd2NrXF{.IVHw2&Idu ce(6 L"i?  RO|FQ&@D:RXҥJEŸGh#m>]>. 7zs;5%8'˷ߗm4ٚp8n%J?? >'eth$m,luX8ltR,Hk)}\jBHa-,:$F(@OkB2N[$YB @42#CR^ eY,'yP0MDnd1ata.FzvÛ0\ j6eQ{tB=j N!4,ycI#skt$+AdJƲ;- $CA,:+!SH 8FIA9VYx20 ""(GWas@ L:O&&Nhd@3R`9ǜNxREhTt(8#&IEXP3.8$(bQ{lGbVÏ!03!: qqf\T=.Z7L;r $HBɁڨA^a<\0(z\.n-ykJlxkjl?06^Z Qz 蒥C[FI&;`odHEHo{PdQcP>A9)eHCI/I'Fw!{C< d!R&Ydcq{I n:avgr^ -WF_BpZ]u{ wﺜT@J&C\hd 땞OY[}}Z#h]?FMtLt:kZ\ճYw-Zwjzx}ma=ϵ\OWxs=)\{M4lnHOF"lkN?m5Ȓb=n>Ox_-b/*{t\:,_`d8\ mtY5 -saEEJDD#CQ$" rpd -жcipylk&y6-_Nxp0-[?y}QZ5O~!wBMV "wn?o& jL6J{}uZn3RGj?s^cinj'*WX;L"$)PI)AZz ywWIwUWflW^ez^`2NZ*MF+ Hpb1&YT \Q*uYf[El4kx>zmϲh;|@oMNUws +fT4zkX-2A,a( e&haN%pp4E9"MN$n,qjk%N(^2 LdVPz"ٱ`=;Z}|uɹ\ a1qB8320~x K" (Jg`XӟojG S5 TH Iʵ3Qc76>~y?QrW4h}w9Cmu>"8Vn02Dfդ}y,7ϖC } i͇qKӏY?z w8ywtTa/em83OO_Ub)7H?#`bt1<ڮ?E;z͔m]dr϶[nn?T3gdz9ܻo+?*jݠkK{F)qaMq? 6( uж8Y衯.4ВwLc~uޑ\ɑmN]ݜqS*vt%wd:ygK)ngRmmZqZ^~ qּ}-4;p2'sB(cpeHbSsc1ppP?b]^!Zοq+aQaq6EhXrͿ֧udQhZ:ԏloz'g'OyҺgV ,4ЇE^Cс%+L m8g[ONn(1 (___Z% L >^c/ړjhۖ0ҥ_/:dC'w+IoL+_T ?mͳMVd V̘եB,mPt108VYsR";6ZҩcQX}"b,b]W/(fa/8DR|05M' tղA7lp("jо?](݈Z wͧt1?2gL7rm<7<_g5j1wi1X'׮br3|RSѝL܅CSj|4wj2L%wxF0IكSa.\kb 'VnD꫹H:ʫ=)\s<e56M;Yu939\'8zW{̂5.6p&^ o dkz`a`z`i]gar-jN}Sy:.A村k~]{l Oo|>ՠ͗r4tSHkcA[~(%<䯃\NWv)  Z!Td0g(C0J-.5O]E\b7IP ZKȴ hL \ŴUM J{;4-HCGW`u,/+Ko4eË6\^_x%< >Y`ؖ^Iw7s~_P:cbüChZ dHTsI+T"T%DbTby%@5fcRZJӷE>m@gT9GIhhYNk}|P yyCfL͝l;n3#:3RXw[ 񞩤?N0-{#NuLd߆pQ.[jٱVZB؂j"l#)`ʹh()N$>9 # ^{0lm' sf  KFmh\[ц@.`].H]&pn9(}b_WU*u{K'/_N%#7Z+j^(eX_$/E"q}H\_$/Ez~cRI:XJ5gx ɒ5 DDZJ NUHӫBb!I >'Љ༕Lj2 xI3cQ<ʭ/*d DkHq@7ʚ18X\XK˭4qZhˇ:DT0ϣ>zZq C4,.Kv-jywcKfXi\TBL/I^5DGSFL WԄnWyΖFF4{Fn~FЍfSJUr.$%){5QA %-h`Ѝ_7ke=tB6ۉB: ve~ UQMآ2SxO"YZ25IiBA@Ɍ(RS)LLJK*%!DWqnp YCYio\d,a(3ryQ63)6n+%+H|pC룇^Emy-z |謻|ՂfUX"@ NiE,ELz22GZ|X_Ɓ2-+DTq)_&"maeo,ˏңA\u2j6?܊\kjJYS_.8#O?^Ed06ey5[_O~j.?x nʜW<:bm_qd0~ax,fHʊ#{mèԢۆ,i)ƭ|/TS|<:\-jp~`Q$m\ףΪ?xy8y/fy0.*6f;;uW ] [/~};?gi_OjdQk+YrULnj@_HiEK?nR%LNb '$c!wۏ~y?sw^HXTz }O~~Ш[ 3r̸)oǛy5tNCv|^e0ݭ_M^:x{Wsɥ%HQkC"*Tݬ=V|P"ymK:_ /nlX ~|Uq$PBq&R4 &J3N;Y_L}w?ٞfg2lOߊD_`zἣsnvcg}cƤwy%PX؏ΕYvxy5@P5)64࣋Jꢋz%Ge;X Wu*"Hf] d`}D/Дs53 F+r:dRxŮyԫl.M@}F:`M6e$$ KV:OOAǜQnCCsJJŔ2RlBIRIdrFP [xzT0)IRIESѹIB*S(@;$;Us+/!+C/ A@ ( "Ɣ4<<6͋Ax;{m ́e}MU`+᫩RUTVԢ8*J-*aeeA *뼫7?hWO'ؠ3bi|pAم&t U: @y, gϬ09m\$d+4Q%VKs [>PX#Z^NY9)k>9/|21[TSޓ yBtc#fg<"? tH ]. 7lՊ=g$f3aK<矙җ AS8eQH{WoKc~̅EWJD ֦r1Z_7O`0=v¼K $[ r6p|leU/ ^Jbi9w^yxoNu̚p뒧 iZ Y@^- jDjzur5:d[:g*L.3FS9'RJD|5i@kt`)dE8z_PLDQz&&t :#a`tS }Nމ73ޙ*  Xyo%C>}~U~:LVөZt*t*F~:ߡH:h4[Tg5޹;pp6kD6|O_rÛF`V8*Tm^W%iM ua(ZXJ%]S 5.6Q((Ir[@ "k빰N? )RyKG$B(w ;#R3lC;lwQY =4W~=+ h;~-GΨsflV[[݄ȀjJMorzPr Xh牑f2`OxK>B E QG*NtCzVyJ"-53q%={kieZSJBxBJ(6D-eeRRBJR !Ea^|sGp޻XY+R ?ˈ/>()SpHF>fHʺ$ ,;̌kXpL2uhg? =Dԓ@Qך0>L 7";K]Fl>7.ƍV-Rْ&A !&HDʱ֘Q, #>cɾ]5_uV謳rЙ|t3}5e[.nET{/\r%˥0x?O&G,~8!?KB1L烓">:Ds2xǓO'iJǹ,UkE<_^U˪*=i?Nl8jJzYiSo$1OF4' 6 ٶu4X!膶TM"UK/m6H:g-oȀy/Ncd[bV; F^/٣x6wavO x~v9sZ{7I6hxPv4@RPŚ7fyfݭp.46q8Zز@Vouqz.{lMܬgVr'n n% վ7]j,A=:=Z2DfBHAxkEĈ"!KaCIV1Ag|{>3]upH Gë'U'2(c7XCB&͌kHY' Qq!*Y7RTNY_Ly}֝ɵq*mr,q+D|o(ϩ!0YvyE-Z4|K( 5مW4|h)2JUP}ڊhduىQߝP%vj{N(2t!TZŊPșt6Zt6^@;~VR !00J .l(+-sa3r嶖TX}ƑLΖR)iR'JkY gR,U}R@F2m.#d" l|`]) 9tL 9Oe 7Q+PH 2+x=ar"FߖT``N5wur @=YXC־t"ƙ-PQhŒPB%NK" #CIi|3#=} y*x^SO7_LLva\n[[q7twy7̽aT2?4 '#^akO)uKGĻ.j{Zj5jls*httshY''y[쥼t̝׭Aw.*]r#7F?%:±^occ홷UE$&j" V9nYYUȃsD&:U)lmSݗ!~lݧ^X?>Jޔڕy3L-xrsهW?{7\Epi6r&W8t3?хE'cBT4m9VHE(2K:vDP'T䗗i|VA)oYl)0. tǣ@Z~ҪձedYۇ]G{?~eku։KO5Fobp C^8"\ĺf=cIp:N3/Ua-'vl_p1a!b)kC 콍ٸ`%&t6$A W뒲ϓQ4<*OV:F"BπdPܳq 0-#"I=2b[9Xw,n>TDsrO9&JsӮѮ8Q]rc`1GnF}QשJe H6F)#U58*m+l!Rx_s9uU['_l+tɫS8Zt\@1[ 4*5.ȀL1GW>|_L#jvQ3#koNœHp0o+o9-t0]1vN7^>wL ɏJ?L/z3ب된veboGvAg{ٝe$Y֦U)~N>k,ݏגwfsJh)+}Jft17]&,|,?,h<]~iņ9ׇ<_x7"xjŋZ\x0Oқ[ގ]^&Qۇ9X/h4oKS.{W/T?Ɗp6LMt hhR֣^rIv__ |⽠){+mD~U:N8aͅF(GYrAГ F阭ѥhrSDqeX͜ȸ Cbn;,|Beՠm 7N5: dß5h\?ܿLsg|,0y=y ]B1iǥАlNn1idz E(^lJdGZHZɌl90FjFl?qy.]mtP0':~# xtZύ9Xgm.g1g Κ ect (r d"71S$L@"Q r̬" $+]ey+{ıu'Huxp"KbP4z-n#IH.RLNIft-y|, H,*2&A|+ٗ(fI(FQ`9KVxuawbswFO'eigaivGYx{#6֐jF]^=A\8ޣݷŅuC/YLvtl]FvH-jXڿm|~jWZnpK˓>c~p5wY4JG\Dzp[*-M&zLKt*NH+Txu7c7/QȶچFi 0v,"dm5DYݠᷛĜ0q\bψ~L^Ef%8?X{4 }㛗4qz8.d&B[IH yoshuv[y{u#A3V4l8 i0-pXyv4F1&>hZFϤq}z.i@>UP5Q+"iـc΋evS5U52jJj V3RHtFHN)yc !hdkŤpR9O.E}Of sKք,%NM=DsR70x39#bWweDO9`}EI_Mp22Zk;$`. Ia~ٵ8\W?5qLmm E)xDUNuhs4gz&0!ېiRs4c eO*(#DB#UD+!tZQ-1JL/-8 0pֳMoGO%*v.V^R_{6WKzB|P}Ӈ7X:^l.lǦ7jܯQV'G,Ucx Xm4N& z$ ἠ4~{l^apE)0BRh] yrYˆb+2*2hRUQqZ'6ItŁѿpf_v.]qlzk"#frA>B;M h. 1!$}E#R(ndGA2V5Bel$&U.E\"HlhRGn%t畏YJ%c&W>'v>3BC3YWz7!&> exY{;2-*;_2ѯxr1k6cdgY{Ys/$"# $Q8fDx)=|wG0g43H cd!zO?sdm!l$]|ptSy ]w_@<Z#w t "RG0 fz AhF 1:γ y}5_XN>WwYשݗ  2I \dJ|7 0O8*c֙ CV1*@,2H-QRZ'- 5&x-GB&*;U!k=-.KuȨBeme-S08kt8p޿?_S$q5NK??xxӻ~~]<‹ENjv]^ r 7i!rb٪!ZO4d~cؿ _+Ƭ|KPҔmPlx?h0AlS5(q.gـ2~ʿ'{KsZ+Km>|otrdp\{ [Wop;] nNG6i^aQ6`ګNu?Q{,d6L-컯{ւXtSwaE#jHnХYS'ZюN(t cD1D>"*8.9k<22 :K3 /dҾQ7G+T}V WoŵX[qe{͈oOϩ Q=VR9#H#"_-{˫tu'oa\0/v6 UjIs1rwJHřgGQ*qxBNKM&CF8i$8𜕨PxR\r}*M9'(2t*R9DތTm~1V/og5c-A [6*V@Aѥ-IĵQqǔUsm̉ag2*dTF(9Ņ`Qқs!'IbLBYM-PeV3gL K4=#DŒДMjQY_ђǻ/vw&ZҤrqu.iΜΨ̵9i^td ǝs;)wTL_2MNJVИJ-Kk8D6ٻ6r$W8`K@pef7Y,&wAH[,y%9wqKV%'8"Sbe IHQA0TAii } &8 iOc h/i҃>,p$B:9JϞQMIr!Bd9ו< &ƸDPqK, S_7DWCxM]κ]Mu]NyŸ?.7ب+>RK&MݭcwBGw7Hbgl6$Lh pbvsf%Гr9S&gJΆr!:<8}`p2B0elwmf>KIQܙ(R "$MəAw[>"(mY(ع7v&lR^yG<1JA0YKl\e5@8@-Bv[yDȳR,br)F](1MY'G Ybu "dSgiDy8i\#ИLHZ 1댜A¼lXhѝTfFlL"0i"WLhl4:$HUrѫpzà!!I %%F_bB))&9,3"x%QDNL PVؓg *yelfO*`Bf\g'Tn-W}ρF6 LC ePG B&S&N2@0Qw]/c)c4?D k?u agPܓ;>i܁mԓ'sC Wҙ`yS! kؠ|Uͫo4y}2-EɻkOF;'lW$rSϸ姧Q+n] Zܸӌ䢡L3I5iC9k6;,=ox^=:胦v9}P6bHэ! mun;?=_qKϖxm q3o9ɫ/2NxZ]6;h:W4@TW(U&WQ2G룊xIdiP*[oW/z7M:yAG(I kN]g - r2(џZ(kd* f+\2!FK) &&3O+  2۝'vMq FbT'nC.#jiu B6B6VBz XUGg Ed$y'-cZ9Ryhڨbd;V=),ќXA0UHq*pѲ(Q$ HTT)Rg㥗 .w~.ѧD*;Lx d*U;1Lk=JHuNu>&nJ\'C [A-l;9Ok39ž;/tDKQ5ve_LSi=G߽zwhy^]5uH`;֪ɼ[]6?TaDžG0mP@EQlDʛl=҆}@*4]wʠj)e;];.|(L #-raUQVEYٮ]@Wp8yaf#=!-k4#nץ.SЏRmфc}tqn" CY# =MÎ"ZEmEK5@ I#iKpýوf룓QKbUEjDHFa@eHYr0,8 Bg.DV&nȬ+;#gI𱾜\Ru}4}M{Ȅ Isȿ< g=cɶs] Ҧcԁ0c%hFZd-J7Abfqk/QW{nÖE:na<8}Ro5z$hiE!ѧWMȺh-JuvKov5e.xy0ѧV_BM0ʘuc k$|<ؾzΪ]}w!ꞑۣ]2벿#Yםڹkء+奇m#v|1Z]1:'&Zbv"9YxsH@FEo >~Xpv>pڽ1QH 9sM=0[&! !jŮ3==MR>ƈbW_9saM3?_^tx^ru ک,ꔅ /;rx4zkE\^bYI]Jˀч8@"3Dg29YI_t tg7RbOXOIUQBY҇64GJ&cBgeF $`ƉRSkbRJ]&/" ɍRIk0l8j3rtZzBFwa0MbCXl@v@l[At4\7wW)!zq'5}keGz*x`_?)NT,SjQ 1:2y@I%0V캎d&`1K.HhU }B3xPs-= |&%յf쌜 *E){хqƶPw uNuᒢ2UZ:7D=Hkz=t~8\cGrg%Y,.`{x@z,`2Ka [1TcK+32ϣ`2Aڔ}A(tYN2v;#q#e\;m }gZ{^k pG##w.[ԥxg'u! 3:Na&` r +:Db&0P$lIG$Y!dTTag<~^hĮc[:ֈ׈F\)'w$(K:eA4rd9mK]` 8pVHu3 mH6\ "F&##E]'f팜 %zՁn|qɖzvNbF$VRBrn"Ԏ΄r ^Pq壍.d -^܅^<˹#VvՖw–Ad5EG> |{چlAT\mZCN&OZKNoQMdh{1:pR#mEKnC$]%Պt٧DJl8ΒbgJ; &39x@T'M#]]wo\KtSSznGDsxvߔ*5n_Z뻧Mtf1tH-kjmƏwڠ[-d<^bg7sބ7 #mt_~El`<לgCO>hoG4?y18cF2˂"Fd +k@82 V&ÙA y8*+.'O-_Ol^{q Flf<;pbp4yD&D!2xML=W:;=9#IbK83/;A+hGHXxeZBB4 au+2ɬ|8yAӔD S 䴳FFC4R9XV[\.it\+R]VL5V\NMZYpuW$kvbkޯ(xi gEM˓wmB8;:qn{$S5 t!ڢUлF9Y0@o%|e~w_&d.;ۤۨg49%OZ! pz Fԙt bÒ90F#R+I=z\=b7׍0s'nK2 6dkJ*R(5uGb"Y9}$`Z>J.E(TP}BQ> G(T](TP}BQ> G(V BQ> G"BQ>],vP}BQ> Gx' 1Ϊd~5gU*(-ꃖ?䴜UϪP!ifLT 3Qa&*D0fLT 3Qa&zP" 3Qa&*D0fLT 3zm 3Q٦)D0fLT 3Qa&*D0rk( 6Vw\~eLMZk׎3[KJ.;Xv?rztϺwU)y)pgRH ୲=7$HX˃s۲TA҇*&h`P3 gRXQ1xΒj=zu& m8W5x ^bM^Wnqͫ/ 7>7P D#!R t4ըA2˭YEEy;QԩﵽiV[垰G19yn却mo^ƿar*̿YlϦyLj;5 &(@H|[`u& :\`eyI*%"\hr$! ;{$N ­6̅sa}1$I@egSG JsN_r50Lu+K !ҝ'8368rpGɫ3&| h%{"X AƖ0>a85d4Ȅg.MC^ktk*AfV2(P.w'5>BEoeLD҅$΄q! XGkXA@%HeTvI˥<7ޚʳNKf3dഞEn1_bp:p%?u~6L|zp}w=6XfL^qL"~U7#ԅ tx-}͸$I4kQҁ0괐̤ȁ0"`^q@;^-6TR0JAX?qo{N+|U:OOݾ^fZhiM}>q摩}dP&0ƧqWH!9t+l쇝 62vʿ&49 ADz t2(oMrB Q\*eψB]V>'i۲"׳zwTOgEj Ok^F4[ж-͔}bF4iɲk:RymbQBt(m#E]f\zw/<-OMU7|`*`L)C@V1.H)x$ǖxH̩M zc7et`x<`؟Dd)pJLJgL 6<B@)'ؘ١R[ǸK0Qvߺ1zyt\Oo-ӃC]͍P18Tܞ3|Y۟[z狯9J͓ 4:~m(̣٩#U >U{D?<" n5|ny<ȡh|86*#8n$ʺ ' L!p-L3nAzQh 3f/2'C_fs-M(4Yei6Fw5V*"~TsC H`Z璎~ot}Pk9'|}c>U+}jfڄ~]\|z~~9*ũ?Lrsn#p4<{~;w`ִY[:] [ 'rm3˓6ᨱO[̫h8Uq k[eV\꺾jY:Q8בܰh'*ݧhJ C#_8p?_ْYD<0Z*̿RN^βʷhu(!Ju?]|/h$>~ǣ?aGǯq 4z\׉0xy'&%S!F ʼ`rtv Ogsb3<]![&A=fgg3vk3vu:gJˤnz0K>y5pˬPP;Fx͍ӎQČy`yL yZJa%gm ! $'޸|Y+ʰmA.4R N:}$OWIolVC}CZA=NG4H6F;}s?蛻WgMM`_zPpDГ9•#WTxIڋh.F<RnB]k6ŘAO!EnH40/1Ig4`-U[PDHP?{֍ k_6Gݸ*Y'3>Lu4`k%Q(v~6x%"}T#ĵ___־|R$/= }T*c p?sr5i BsHVGNE!;\ S."\N{u=]YwH݁}[^Πw @[p.e:oБO0_۫ڃ7\Xu%mZk.YR!Q4$_GRl7#FA˄& Y~/) Ri#xu>PB*SlgzˁvbS^޺ Y{CVC=̤ٽ{`lC :\ nGmi= @R`GESWM1NFDtGpZw%ʘ>_OϮv8ϭכSc_?ºs K8ȷh^%In]2}{5yWF>BOe.oICxt%ϵ.ԬezN>{ igkd|a!׫}]w0F>p`^'/Hps3kV\WK7ԝGv r}`H=dх:H`vh pԗn^.\Pֽ(m7[+vzz@a; !-j!&t/uKhU*gˀWkŚ|89 EY?;:w6() Z-C\B*գflK՗4Lb 8R:tPϬtM)p1೅r1UIO5OàI%4b}+ Vm5MlV>ƪByu)NW=>M5;\$ڊdRC%A ++ . kf6г%تcD zm JC=#޺pL/$+X{2[Tv+omVy>iBxs ?I=ē=fW^1^^ml'd#:96RmA$2xW`QQ ySZIyR5l?/|9M>ުsmdbq&VZESR5j!:t]Ŭ c#@ꟛ~kqU"q!ۿh :ўGYb7E4c@H"!ؖadZH9e6+Nୠ75.iwR`ADŽg ٳ\\.i9VU#rH,ɹ8Lqp `/Ѝ!zp5b>i|۫˿VF;YYZ;p(>\~jrѕ r1YOX4pԪ]fFѻ8fYɓ2qSU'vռښQz|3(|=Ug|&h/@s(ӤG uY N&,~:\<;(D]UsKJ 6-˟dގ9a˛u_kz98\xe>LS'"N}*ѵGxRAhoQ򃞒&Ќ9pF/vY~{/;9>kfнZN]T_j}14]ox\O߾-]q_M/2EJֵd65c66nlfu,Be|>O2m)~>>ugWw 4nlը[] rSƊ"EayBFZ$痹}IQS@gn_%o RMǓY*"wenk]2M5GF2GgjǓQŴW+cVv꯳!go;|ëwo_~|?&޼Wo~;9"rUrG O?|^մh<>ivCC_o+xOU7[b#4R7e"P*̮wGlvck''ǂeMk@]"U(p3)5q WkH-lxvgBtSc{d瘔cZ.uX[H!ɒ59{懘\ؗ4ľ?c#;_F7aՐB1sfYT &7yLh5%E!MN̢S>BQ: 84[N!͘(iSN޺pB#k>ͥ]:ypjV<3}em&x3Vf5ٶ[x8n9R ,n Xõv_[#++DYb s#<8l+j\3elPzT}3v< 8%t/Ӣ/ <6GJR9_sRo歾~1ĜA/RXU\m˜PNlq.Gg^ׅjrFK[ᝣ}2hpi>Ѱ!~ {YofGWmLx*R.;<ӻ>x   VfCQ.d:U$L0X5U[}++b)08ILR-Lޠ`R^圝U)! Hb>ªS](AFetjRrqV]Fu~`t[ }7΁wٙ`E >?~iG^}v};gӫȾvn8ZtJ1H(˫oFWGY6,pT0p| O+'3*9xڮ>ڃ:y&tb:MW!ΞWO7w8zq^deuU)l,V2Tu J2 K2ycPb0nh}†FHvt-q敽l`͆Nm}Mas| zGI=N!Yi*hǜ?.W3K3*&a5De۱Lq,3(fu N' F1*,2M͒;O fнiz4gDdQ;]]^?1>WqI޴ 퍖O8-*8jw*3ߌ&'ɲH<5 p7H]a kׂr6I+k +>B,WjAOQwEc0%JLh+I+|UފnREHFfkmGŏ=X^ /A/yIڊe+y[-_t5eu=nf!dF& M>'۟cO7J..N'n>4LAa~dPpmL2%3h”?0 ApΫ/N&Z£+,B e 66(HMHeK/پ.[{I uKoV|IW}pӃhϥ~^ N!s+̰ԉ<WƢRrIfk2h2ꂰxUF%BWtV)K>&e4V* )12 V^fΞR(3BC3ԥ5#5OV1$U 'w7R3rl>Vfvzi_$J FVHFCwO`BZ@1K9Ne,/ 0 4H٤QIl`֖Gn-P:΂,%f+GJ&>!bB̆s)}ys~{kZlfy2eEtY~- ce!ǀB,/ҜR/MLoQ#p(. j3m 5,#+K5@8.B3taU+\`j*q F&G $huZ#X@@cj>f09dv(}I ȪL9d/.nzaem ]Rp?]UOKy?vUe4K%U/lz{q5ۼ|r5]/n.,^˼_rInl~~-R %2bxcZ9΅vO7`]jfٺ!a2-Vɘ@9qdc #p]K*fmY{q]cP#tyfc“cgEWt/oJi8)r^no%գ߰ ~RKK 3/oѡx4m\I6~~b: бn뎸Ţ%k^jToM;|twwˆ'6)LFMVu6R]b\y&+Tqޞ<qΤQX qMb})[JCoX VXD-N.7]hced䦱Cu=91ɵ,pJq9-z-,XŎjxQDi=w`LRgs4*fdL05K)&-mukA1Aj$jK A|NzpJ ;t b}4cQUfDvhA5n/%jM ˫KE!/k=BJ&C'qNZe Ϩ"A3tbhȃ5aR28xhyaő*;H5su 7E[΢D p `2%Fm끋 ZR\lt2r g}H<1ŝ*V9&Edmpkù#djRcE0bv0g0B '48F+L_4hJu]XA 5hQT+.Í[77wW^ ,7LEhᡱ:&e!İr,.tIdtHdt"E:"RI+)pL(5 9RbȘ4"eu6u;L"ʗz6>J%.2њeA5s\: hIJM=y ]1ǥ0*+U`sb !՘$ʬ1`{AK)h6V2Y(Rfui#v5sv#N jWG\$IKqE'TL< ?G's\ECF90f%H*f.x\CȐ+bA&r39HPHE :@UjÖԯr 0vkcQWFD9  F| JcEB% >ptbG)!B#K4bJpU6U"g "nf0\FD.8d"$F& N(#*j+V3gD.W%Ym%)pq܈|%z% jd*CH({H񂶌k0Hp!=Dp\itD} )+鶝#wbuBײHϓmT'~&P!ͱ$9ƅ"-i#M44(t玗9eK4G/e7{ڃIPd5싲٥D*J2je9KVxpHsء|N?7tm,rr޿z1[MOMj P5wBۻ.AC.1޻ݷuC/Ewtf;vH-jZۿmMύww4/|&_s5Emg pvͧZѕ_-f^U*QI&qmB_MoV GԹ>h)HDr'mT1O̵2xFÅ`q_\fs'hxBko0\o^\Jc[WcB3zR+a\&O]Ԇ.tvK2{*x0l(AL!;6i>ቝjFtb&ZH D˼&V,@2.9QhgYd83Q`/Xq HJainoY۽pzbb~_/%E8>/A L3F<5d2 I`fAsKV`,`H`:8t6} 0'ǍB28Tdf,Y_`˵ z_&M9b`3噘J+{Zռ/jZûZD1~C5V4Z2\uઘ /pU΋G+•5Ȅ\ \spUD9՗+F .vas8\8siì=Wtuc9}%cGpU 7pU̕b]bZ:+ #* f.U;WJ+p%р=+2p* *֞:8\+p JY GpEKbUpUa;•tӫ_K[?#5er(7mꏋ噵9f;͗yd!Z]mo[7+Bcsȇnmh60%$'^ã˶d-7 ֡!ÙpF_Du˛o{d4j̾,z@Gi5Fϛ|R#0+|9Fp%׸ӕZRL%aڐR ?cWpUɵ/*wJfW_!\Y"ėW`r઒^ \1Z|GFWƦ|If`r|v\b*DWLhWOBhm^#G W3p܏Jcft=\mA8l{1pUERRkw*k+%A[v5qѸ%?l ~U%~ ;K~K61L*$8= )6H5kE[1οWh!ps;>Yjd}=Vn~~o+WUo)Wƀ} uIHu1JV( I'ҥhBJ62+7-JPmwۻ/pxX)y]|s\Wimn߾e ߟ~~KaKQWS^P}![@NI{CTQy tBH$});c,CǖQޜ7vr+ai`߶)̵ͷsF^]:۫PÏoF~|R>~G)G:Ͱqo >5n[Yϛ7F.n e۳_8oA`kEGQ4h!6EJ㼤`>QYQ]K$ ފfr !h فsr5f^*[ee.!ba_ϱzk {r{;_1s4ym4TLj:|DhJ\D 6VT$ETP3 KR `X ŬSS^F l8`NZbǂ9w jPhx1ɣEI kk$|r\XUNFٙ"Ϛ:;^SNrv7Ȕ^|69pƥ%GY] =ѡ('<0N:J)2gKJZ#8ГNT(sr90hAkd쌜;]3,3B-n]aaRߓ&ip/8z ?OǓ?8bG!+ *E9˺g GF/Bփ F%F*Ygw"r /C5|B y\Me `0YI)$%Lh]Lj9w#vҒ+wl(EY{k8"k'cʐ2ΫXKe`8²+*&N]cp-d ي*ɔY(e.V̢lTTv]3r֨~ʣiwmQwj{Ds'/2!BRbRt"RfՈ،#(yDNa,m5*b,dQ3!$lbdhQ#bg܍O28nfµKθdK\Բc\{\˸S.06DɦiIXLaM6 h]pDcXqǶxرq;<|[Xkem`3p_Jڇ9~aRZV+Ha[}c1z[,F,Y3 P jE̪F 8" >̱0?@ O}Vul޼tK"o%m<Rלu0_,U[} n_[7ַw9FF4ܾmܲs޶7zl|w糫wv~ 7~(0x:pLo?ڬaqkΧ?oz{痦nMsS9}/'e6&9@ӹm>)]a?~ՏfX"jj>C&.k lKQPtH]2& ~c;Xd곔J&Š!fpߵ~{zAoۃ^y[^׽uq0o'th JP,xg"6$}Tf:#1cȝ8IZi\d,h`8cK&[vVHDQ9t*[61dP%cAiQE ){7-)$#= pI(EP41(؜١o@f;ӌn'.\BW=15br2m}R'fFOx[CZ@tlBC)*QdRXYԠ%8S t .x2 _SٸL:z穄 (]RG< Uȹ[WY#zRS\>{?$`M1=5XoW-g,/(y%X\bJ1TjIzns6 sK#G\]E^r6+BSb ~q96?VYN!㱻[)J(U9:J\tyZO}Έ I9*tNH3AHTI͚*LJ;es23![TJlR,k!YHuFΆ~y+Z*%-W~;_\(7Y!n 𢡊6V^y-Wz,ijdL>[E^t+M.PmF[0 =+JY⭘_uqym۞EZZUoɰ #ԑe\`Ȟ!XfHUwcP̸m1V["CŠ-e`,LҡtIDY aV$TQ@,{7T[&1D@FAAr7*60\XٔX RM%)mQ|l#jWcYVF%0V]-I7Nup[0g\J8՚:>1s!OzM,5 _]GQ~ŷ]s%nU/aOz'~M~̽۹,;A(jzⴟ%}-Ƀ^zW:o,d!kxBxxv̞ @Q|)ݵº\@/n=|8i9'~? }ʩy>ÏKܷ Ҷh_oZ:::.(Bҹz|7jC15)dir/x8FǗjԴ<}߻2Y޴y= _[?O~l|w19}fY1A s/~:('ӵUtKΑ`8bFja[,n?^c]yHҊ#^=ٺaa0kYfw,)-> Up2_嘧g[&׎*'nusE"Z:gy.#u`ᨭv1νaU8 +;u7[~Atk^/,0U=-N6Txzipt6{qEKտSB-S5̩Nx:>?1H}o;y7}sxI5@{ J=>n Û7eWCxM6]zK]Ny͸>.g'G9Oa%O"ҏsfif@51*lCjQ\.ypy y@CZE4څbI4knBv 5:fjVmz/3>eRNHJF$$2.V"7dY6jN3%gKe 0b"".x&,3UVst+u$Z8m KO-,aoϡ^ 4Mo*n_nroOeର+~7oH?%홖f&% V0Z [ZcI"maLwVMg֪* ɝK40! Uw *sN #jtߔsL('<'J#"2qGkEb vMs?1Bmnv[ۥ/-n8Cx~ ht{&QP0CZ?T\R)Ɏ?9n ]B]q ^b8bYGuT w`U VC2]DqE"UlI!㊐  Ffa B+p?-S[31+^@S䣣)  9.I\KVtIRK,B DgGr;jKOlc`RaJ* 4uM/6+YlL|J?\̇5WX?a4go~#.>:a00 _/?^Q\Ϥw(qt0ʖTA:Z3hLj }3y^7g &WzߠQSz4zY?f0in߇4vf3PDŽ_8 ֒لUg*!T>j`H=f4N{X RaqP7|0+z͹D|Ft]#|t%4% 䫾m!,f>b\@ii7dUC汕\>V1*?..x ZjϺݿrSf1RwTV_B=6I#! !J'aӯY8ͿwԴ)ejAzrn9,W3To~;4NyjtIby:mY$@$kiuz_E2h[|Mk5QTv`n)p+;yOΈ9ŻC]/At'7Ԍ= q!RZ ҈ X9` y`g"i {$Q"eΦ9f5g;J"}`0 1R䅌DYi;: e,6zMxxxZNl>~$e;Φ+'hTB[`12cga(@.t9JP$:3@I8%F*]2C eP!v5_^Pb,?l B*ŗ.}eҪ7UsrAb:z ʑ+`6~5{P ]%|X`Q2M%`;&1 ]O_IP˓%O-"w<(MN|Sz-6B<d>*AJoD&r#jDLpiј Q0!S&a*1^Q0kk ;R5,3&ΖٌдE%O$}WROܺ |ݯO^E!H wßX`9:0+p6*%!^HE,3e@ [CNX B`1²Uhªq`$yfuԅglL.w XD"]P ( gr)$1Zx*V HG U?f]cٰ6-O_PڡKE>Qsf]c29m_] ,6~ohX/lRPb.eɈҥF0-_`*_gBX]J #cW *pr+YY m mtnrlNΊL׊͡^+k)L@yM>jb w,t8vS*4G~ _iTqR%0uQI&ɪ]68jUϊ8Wݛ?*2WW5~QQVE-$ ]+i׷7,x_ _ w Ϯ_`aQ_)IW}Mc{buiD-1|f6 5 ?+sy]ŧ{xU2uX|ͯʨ@l/'Eǫo;O='|Dp "@\ @Z[W ƤW q~Kp*_j[4\V6W]b{0|@i)>b2kFsh>KL` {H|%Lή?~3Qo>J݋z;re/8|_,201hv:&owvܾ_?z>k)%V®AY*P\<,Kc4Kg3}fkǩVOMEA*I)EǸ.O8}% &qI2;,% LqˀY,CjtNcKVYe.V jM0j*>$GsUX@ZDۏK 9Z']N+Xqt4pX*II wpJєJQ>q ;i)o=\\^ \ifGW 0!c &qIZWIJ:zpEzm3k0#p`Ab*.p`i>\=XJ&vm;eqRk?i'UdX4%<'{F] Z6AdA=DŽ%R%۳")ْ,k]utZ[iP>^f/O̳~KM%O)8?T4:5Dr.w ;8ܫo8YA6#`t45XGsfDi26DlI{LpM8BN \!Z=\!^\띩x},gGɧY?c ca953OGZj&ۥzg ^JqrlZ\[ZECOm"&}[O$NjgW]=^(=|`ose=nuUlя[ӰڕMtl&#Rf-ð] 1 3#@QA]HvQ?0eg5o] |L(Ӽۭ}]ͿTE 5*稵2Ui.Ȣ)"4룴YFMx f&$H#?&D{Y-__>EN1i`ټ7RE&'RFl22 IbKyO:/b"%$'12fE,Me\L\"9QlO=)NFi$x~RBycFj"C>yj-%JdITiԭR :KeF:M_yq~~=ʥ֌%)m]3BJᨨf*BF'0)SS%*D 5HX#r5(fLcSJd38f3NYVIʔE=3}CdD2S e2pʲ8$I !Sn9{Ft ;J0U@)?ާ\1Ѩ*ciͻCs9[_ _[O .Xd!}C =zuqs};ThiV:uH lƟ+s\ֆ`WUNȪs#s( f[4hZ)*!wBxhQFA89H01.!X5E_G8:BӏkL_=ҲCHK\0c;ƾE=I+}1뜵'Q\2c I&l.ghliWBb&E `0J!B Q}2d{R]J };a*I%CLxePl K-OnEE F'+9+PCۆuqx(FM&Jm`yUc-}(q0h y27q,jBP(e :2HqZxj+pYW0THq80Miܜ膥D@ XȀ74tkjY (r  P ލl[рE=f"]2U+V#oUِ6C&Ծ7 XIa``R{He&d!fP57._s*.P!S `5a( VH(@ Lh44QD&+eB>7[SR :,j8bG-0 jGPAA. DU9%;3X@p?e ` I&- p]BP LF0J8M=n0XQGxd)$h~VJ@!O)A:J]لVeFVGe] %ٗ5-iT^>яT%PȾGhh 2RDāj`#hV6 H۝@VAzP7_&$M&2Й:H|ٯ_DńE~ŬQ@s|]1F@TPȝڡ&_0{_`}9-mv0vNEܲmɨzT34%MJckP\q@ɁhOKW40<@xfgpA|ѨBB'䅵yPK4!2 *|p|RT+w0,9Ѹ8ЯTEK*z`:G͠ ۀ8 ΍dA*X?<{UrugD%H 5Yr&貚 I";> m:_ԝGUD|ҷ{%P}+xm m,"{إt13|R;ЋQuD!p9X.h 7LE`jI7Ӡk@)@2{Ez(%G&b%ssǂ@AK^/B>J :iMD0j!ik 4^ !:2%YصF!uF I,ds P  7Q% CQiI6cB:*gDh!c+9CєV.ǟ?zO_!ud1s(T#i% Pf9v-} 4RIoQ{)@E8 7ܭE @T ~ KPA`2[6Ѧ`A[+۸dr6OWiżn1veM_h qfQ:Z 4Q\Cژ9Ev߯ ENm6ZR!ΣDjсaK6fPN"Fjx6L|QiF5h7%<%:` rhSbHz(Gȍ ޗ"Q].jP:SAAJ@R9$tiS)7 ! @6m6@C+ MJ*UJ\(\BPޠz׻,/V2 -)~B.24F@R%HԜEa,Tu`\GҩH*<QNڙZgTn4H5j+U|tu2 BhaR5gm} pStrxd^򭴾DJ'oj+q'Qk +@(\JVya2ia3_ vrPC%#qU{`Qs6VF˥>Q`eSFwLf5d_0CP$ 0tAb鱛jAH!M*?"u-+yG[DHz+@<igqbq̲%iNV3MP[+HEm"۽n8)bS5;EgSeMDfճ}?aНao~aV@~0_P" IႺ:eQ! @JY@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N@Z fO DKHk+xOZac'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v:,& dx@׸q!ZTzN / N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@zN `Ûl<''vG'5#:Ch)0F ='My}GNpԨdTԨݷ~V+ډuw oۋU&/U?]Rfby5˓?L1ea>*qwzVvdɅ&Z44Ҧ[Ɋ<#,*YܳǛZ9o㥲{~!A׾Sm6-QN^݋>__ɮYYT'3k VN>8jy=6y_st~=7oO}q/miu^庬A"Vo|ͩ_:'7\]O›x~#' (ɲfjڹƜoyFuOaҧnsuy}3.r9XfY~{͛CBv~^Z>GϝhqjO?Gן^i->Χ{szqu(g-Q+P\FM*pu%' R7>$&\כE[-ad/S 9bil-ʗ K&`DmN:n"dK)tBڒ־;CyCYۼIτ$9;2*-~ٺbI_M%KPioчU_|^| 5;""ItgGz2FH= bEUU;jn3BZc,3bEe]͆=bVO+^{~;_vu ZpSQ7(cqAֿʾr{n]W;7RgܹPxOd>d{5J>NvNr ooo 4b>5J]ru*sSR>L =?X>`_f7$ݓ̘"%?UáD]4ǀeYjVTU_wuU|u¤<00Unujp|_nJt}S= =J8m>yy#9;,-P☈)c3(0>{-=ܐ{(-3+mq]$fZ."QK2Kj)` s pr8xWq>&;ۇ2\7n@φNNk?MN~aR:@ׂTSJ{bt2dRwP r|=wXNJ._SR7``j~SH̪c֯Q A==w'G'סs^'qsJI5J0#jPcJLYME:%bڔDY&1Z)ϑA<`XtN1q<{t4vHc/~ɑ\Z2-xL B84Қ I,]PL=. |H=źjO{Ŵ@yS_ԃ_vn6n*:&P#+Ye@Bm4%T`./x \H;q<\mT}7k8q]&HGJڍ:rb1V11h*]mr*@]qs:]*]ڟ0ǃd)Y}g"(11@^*=V6 ßd a{؟嗅M]evG*0[EgcjTs@h{GS1oȈI+h݌[Wۻ;:mdL~l6B Zv>۫Z{r>,f-nOs7}t~{v6]#n=FY: ,CuKPU({nJ\rӀgKڭH/DB"~TE$1U"W"[ym_u2i:t)$qI -IJ"B8K6sRd*`h_bHXdŏRte^^.+B)|֯wϫ{2t9{{%g21 R[Z[g~ޒ Oh(0QATN6x:`#ӰfX44QP%&- 39X6*i4S)Q4G+(ZZG5 }6_[!l^۽q ߱ rW6{?&fL/x| `lICpTl%u@,Hʽ\(dZpTfo*YBnu$+3f&0Z3& ,}8-I\&wp}v5l;$xSN y,_Yj"fG;DM%-*1%NjD fJ8+NN jC&SSY22(1I > cIes)\CW&cߑm:ZN78n`%^C]uYޟlz*GEF AqkW&yk'ygKc>*ãɜIjP[5^17F@ZfΆlHm#᎒1sn('ĀQu TNDVfZ'LI<Հ[Dul1qv;GЂG;gA^׀Ç=r.; =a jz1u>PqǓsЄXc\Ys,@= #ML&i^E=82gX~}_gkI 6<!0m׊f$hbV7XCv ,PTٍnIC^*рFm&7ƍRx $YɍHP60TheΒ`m<RFIc֙2C1JG`3w>$FfkJ1*2WJ,T PŽ9 S_zǴc)ORXΚN|qaFᄏ; ߏF|{xʿ ˻y jb&?^EK"}qʳ-8Ԑ$W.1afo00+lCMATGrMᦠ0B[a6~AI!ʧ&}ёa246Hvb2(r{YYf?4l+; \|VD ֟[IƖlxيaCDӽ&ru5o DPBUXI,Oqߏ~.Ǔఏ^tőhw~ކ،/OnZqCc#ae͛O9?yI5&<4=~R}{) ?_-W~\ŃwL? M1Qhݽ.U›4JN J:ԛ7i$~ִӲM^Ww꽸}&Տ~<[s5o?xJQWq|J|)iA3cLmUٲ;ܼTEޏo֨\ƣ6Yh6MoZ?nnŮ71ew5YG򝔚˺CQ_3不\ _ikdox$4 t򔌍\?kcuA\߸}͚u7 $6z9S5[>Uda|^`4j:^G9]M օ -ykؚjYԺ~QU0jdA=WjҖ]ىٳ(պgN=bĦ;skg}Һ@Ep,DSiEt@ QD3 &zVFIV!lݳ`SM E\  K]~{e<2rL)oZxcy v2ƻu8ϸ29qz!_TuZ^Ϳ.v;mv^Λ*T .콯:*&XytaȯJU%|كC*_pĊEeg+2:*"Qǔ_i&;m}&"XF9&) 3Z QvmOC</pe!͎H F:i>Rl&Y)'ޙ#i2Xl#x 0^{؞ƠݍkLi\UcbFRKRRI3A+_&`D 2=x`8gwor69r;H<~}v0Vs׉cg%?>a!>Vuspa U/ JwY2g\M"&Ԭ(4:R0"W*>I2OƲKd>&VZ֯/MԖ˄5˙EIҚčQqϔSscX7IhIYF"߫tBQE oڒ-{:&y&:aVlhIӌMCeЕ Rr*Rڤ7!+78{%킡_^5Jyz?&Z-w~ळMvl3&9μlU 7BoL- dKVm)+:Pӛ+*`so^.J cl72:u5@HZ*2u<YX H-cM =Ecs vVH:cGHұ Sp@8: Q CMM $yU&i_WDh1.#(8&Sbq0վж~6 doj. 5>du"d4-,(Y-xZ5SV65]ggg9JNtn-ut+&R.3{'?܏Łnƒ35;.99C ތp>Ti~j4?N71 4z_;2U:O?gt{%T=;X}bNfD`Rn$mcc;7{G:w_wyt[h_k)4z/Uqt6^r_4M8i78O#];"iӣɵl]Tyվbp1cq?7'tv92:2{枦gh0l@=|uk/#^{ՇuC*ӖOx.,>=?W<9;J$WW]4rU׵pUȋc.izٞyJi3"ly jVJ:y|yr("%3x"0j_NJ\'ZU3ƿTO&1:z{N. B/^|/xsrNUuYo ;}/ b_-.--lriVo&ult8k޶7!t̘%GKFxlog^{D5xܽb5 MDblla%ekj)4 Ά\v!Dc 8.Bbjb[ii5aw&;KP\s:dgbˠ}ų-v{B者鐘WցIբЁ joH7*rkG;ʭƓld㼖uCգE 54Zl :$\)pt cc26{#> ܁Grr0̵{u.:eP uBԻf@iJM張SUj JFr1x߹qʂC[9.N[ hJY1\ Qh5jQFh\\;{i-t 6Z\bFɀڹ2ѩ\{ M )#iL\wɢKI胏~օއӮW8:e25x|ߚH֑`ۥu4)nټD&RMpIvG;?0+!_xٓ'}**_iVPŌA7i7N;:n T }kѧk2$*k Y) qww,gCO&3sc _"h_%ە#=|f7yy[~XVhٰ(ld4~Y4dQZ;h\\_; ^h=fvXw FNe>ӛeU :vzkyܝ9郯V/gȎFDsLZ.yYRJs& -kmrPk9q赜8Zn"I2hjSpvMl,0mcCѹr/ENk&{L~.J3"η.'ܿ١`Wßp-p6\ۆ"| Y^;1+#Gqp:l0x2x͜/;fqI5 폹cq<`c1(X(V;JqǐZrwǐ4;pw ߰$<2V{Mjp;ڶppA1"\+R+e"`\WhW,ɺ"\\+Ju&w\=ઇh[SP0,IRpEj_ a1K\4>W(XIS H.+RTa1G\)鴕 U+ו+R H^ JjTn$QG&+m2D%`D gg/~]Ngx7Osn+GLNw89UnC[\jeʟzi2u%2bip<}~vs2>}q,zDy>'@mӂxߝOQ9$1;5FC54-T͆neU Pz?w svBEosRsla}DjFWN.._ ?;6È/=Q TqeLº1Uy٥;5[χܢ;;2eٳ`\*)e]wr>6&`vY.г-2~Xy84ZծjuC2ȂcMAdˋݸ+mQۖF@EH+X t%PWk(f*sw-JB] !9+VH,Jr+R\[07+ W(XrHPduG\9 +i1+:2suJlb(q(nͱ}W#GuRn*% WĀm+*W$Vɵ\qecWR #*]; G]uL)"N+T\WR 瀞`ŋբ\Zrt+AI"+Y ,\9&7?R YR#]JOYHWvkMI&,$WcUviRm3`?F  -X)"&E0"2F +]uEr @Zdo]J5એ4 \`'ɕH޺"fX W΂d \8+.w\J-p%7z9fV;{]U7fԺ#-;śzU% ;VH.J;H%W=ĕp2(I\\(ƺ"F+R؀J:Ù-W$Xc]\'Jg[WR UqS$ k^uEr+RM"V #@WPvQ_0 mu] NB~ZkK[Ica1|%8?,M!x1u:=rPSS'ە@}H+frH\ }t-5pE)W$\Qġ`TJpC\YXQ7(3V H./fԊ샎I% A}&U e9$W+RC+ذaprO!I8z:nrg$GҊppzN)W(+{3j;Haz+ {Z'Z++HRTj=ઇBpQbkV Hc fr\IB0?` \C+\qW H U:pz+% c7=vnjmv%,[)8GW6'%GMHr fA[K&YIV% 6L\K4r4-lA0mEkU H-ƪDqE*`UWV*r+ -R+ّJ+c_p%+Ǻ"+RiWjîWc)?C7{Zsn*mf;ԀmR+ L++\Z%r-Ap%.ɺBJc]r])"uE*pC\I9+ɺ"U:;P0ઇµ% \bpEr]1AZr;Hz+Vfch5|¹UN.VT~Z:*VL. l%" -Wv3/6, жkP 3s(RfN f(RdNP2<l mQ7(Lr-ŵa<}fR[pׂ2LMBqe:=hf={/NZ^=U[هxsY+|9lV]>T(*\L+znl[Bv, Vl8\mJ,LYRFKfK Q0W\\[̆zanrWW=ĕ4NWPr+m)BRsw\J!\WTɂpV0(Ǻ"j;Hqz+zrW UgZ{8_!aw1-A $;X T"{oZ2)e6TwԹUҝ;gp-EF4m>K g4=Pr4\|x\W\ ί"J/1#]iK̉el r ]!Z9;D{:F2VhJ3+l͆5WWfGIWZ-1 \s+Dki PtЕ~dJwhXC% .=U3@a[ JtԪgDjj2+,T6tp̅&] eOWHWށH ²+KY.th:]!ʮ92trBgCWWP ]!ZnNWkaz ]IMJ6,E6tp-Ɇ1(U0;z 9Uc|7|KR;eŦ3AJMP/Ȩ$JҨɲZG8kb`}1ZRZccf\+ ?(8M- `CZ@S 38ъfDW|b}Wϧ+Dٻ%]FeѕaH ]!\r+Dk; ()n,[?B\ ]!ZycQvLޞAWUoN~KWC4+LWmjt,i@WV=D0]!`ó++ ͅ-<]!Jz:Bb\pi2+,x> :p3/=|Ҟ88.=BFfCWW ]!ZNNWkt02+ 0B+Dk:o zԫɌ |A }\ CkRSIW@e&2q¤VQv@}|4'F*Upu6+v^UJEdOGHZr׶ lT%t(kҢ+ь ܛ\s+Dku P /1#]Y)ѕ ?B{=DYthe Q*CWUoO4G7ln 6" 8^f(Ucm=]=ꩢB Kβ++L.th_5<eNj/tŔ| 75+υc#+439<BBBWVɮ=]#] M5ѕД<zCP6CQҕRj\~CTXaXRtmo]s5zDf9zt]!ދ(JS͞KWl phΌ(Zgƪl ZB: JNHt|tedY9#`%+QWv^]J՗CW<)9!Fku C;7{xRZvݚjtJSZɈȇd=Ԇ(;6w~Y͈f&BBWְ4;=VI]!`++I6 et(JBaZ]`-+1%']+Dz:J֦:CjJZ|&VgWe%;jS,`V 7C6/?~߻;hK0~VW.F?W\=Iq<^~tӧW~Ra޼4T= į;bjt1JR=p͛7ܛ pg1\y_xk>c 2^[|QfRZS7Z F˳Ťus~ٰo@8kK]vwKt,=zM{YyS=,֛輞_/'$"9E,Bh9^ԟv. $tJ gݪrp >)5ަ7.A1 R㊠?=ú1+-wb% w:?s}UL^.[x^S Jhaes%5z>-Fl~uN%|L>:dm4qWd~>*@AP%ƃYVJxeBbVQt$cG<%,{e`_Iĩ)-L%ʬbN5(Os؝ p-b^:_>m|5}qG&7 u}Ce#l>\HH Lwez=}8ރ] qۛp.zppѡԑ<]X'e3@ۢwq'bB6n![-[b, Q`ENj3W(Jorϱؾl9o[͖^@N7>,/m B)UN;=$(hڂʒVm9Eʇ tL$Ѐg <߷ `[^20PpPJNj 5{M*NA+2y셲ojt{'y` '}R&e~:@1"^~j4[bYn OY VMUPZ‘(] ೄjzVM0H𞤭M$OKjO,5ϴةma]uCvfތ h.lݳ䵀>zW RThQj KI)֭K0YV2C!פ gj8`€XPI$ SD/u^hwy^j.OD:zd%JQeLR b}SFa,*ãW@`CwIO*GiQjR~=&z5%$˼u95g媎J#ĦJAeahFDS8-TN[{o#Ob.1s0|:eϖy!U&C3¡b&xNz Rӥ$%4&%K&[_b u5(uI_1&VݿzqTmξŏ#~5oMܫQ_i(W\DŴ+ԡT&2N+W8RK-!,X$$5galfCO  `il%e1 v'ej=?(eH 16)C!C ; J/]utg/N0&ZS&!(bN@J@ԗ𝖂 E(m>d8.3O+̃r1w1>4PP8N_CR1 (7&2R -vxb6Ԋ&FP[7_;}YoK} b!PDQۖݨ| $o%c^'pXLǸi܈$E)pD+J&FK#)r2Xd^s<7@㻢bWOJ2$('t?qWz XZǭR P%PJiU;vm7ҹgPEɍ iaI"%yLU'J,:mn |h?i>=|-.?=;^+1téX=2yzIO-NG44rcZirg=3{>5{^Ѭ,IPcV[̜<}41S|֐Ձj:Y3%7`j?PlZ U\Y%-G'vfdf.#jGj "OUOţ.7o sGh0*52UDؚvyhv\\૯jYB JERBPH,|?qo.(.~2_?@㏯VH? m ۊQ38MU/0 `HW{ϰ> ~agõ*'ӫṛ|݄͆G~}00Ng[;7R1%s_PR`pN]=V]9(;8  >2%-MjҔT0};Sf|{MZ.j|hUXWPqԇ !t>M6Z?{׶Ǒ\e^!uclZox6,ʪ…O=i!Fiteg쪬u׹_?\LO8=_6 hy#G2{";sLf^ogs*7 ӤFv.l6EҡS-~קW7*df`NQҟ//NrAi@&:8j >'~T- `'SX隆uqwlkkn z^_,g'u@m_N|\zHk'W,3ᆱZb#okޓ D*¯,zؼoz>p3\缻;jl$o[hU?h_ 8mg AXX(Rjٶ1zRo\Jb(RNQ-il3ɌjzYy;3[lMZqƒ^}x ]Mikq$^C׻rSkh1suV\xYxLS nW0-V^6Tͣ+‡S}=oolq᳏U%iӅ&ߨFN.҉B9y (q͜(i?Q(':lr?ΉB9Zx ;\]~v^\:^C_$}0̭K;=ޓtϨ{'M,zcpҪp1e?EZۿ{4ee+,9pl"6 Km ɑc tC 3y)._3˃[s=x :~0ͱ7f>嗿\c.d1/W^-%?|l Z1[ӠP?z+G['s$ς?C|aCYuD]0Ŧ;E(Jֳ]gx6ϓ[ .EO^tw眷h9(|ɖ$JߝNn)4O_`.#Tp,@V㊹[mzZfyӬ[?|c>+;L"m_r9t.SKgGyda]ȣ[i6Wm7 G+1IJ'|bnau;##iOݗ%4 bL#B= 7%S Z>R5X6 -/+QzJȓ -2p`. qw@6q-aOpC$*h p Tjb7aR"42#{Y3b(ނ68yaѕzߊl=Նz&jTD\j @=$xh<0:/$Mh!رg{f1EZYk\ l'`4h1 攡. 3X6;#,Ht:%ppw/T@=@m!%L 9)Y: 9)'6- QBĿ-ȊD-҆%z%r)"oN+oV0 .pNicv QJFH^uK h<76:iy,g_19} xn54 dZ4nMH^s29" ěPa">#µf֠h=2[h! @P+}4 Vq :i#BTuLtta|X2 Egu/l 9rfUuT.EET=0&""x]X~5ax DBFI:|Fҷ+zG-DŽPKՀ\-҅<kq+vTu1`n4_#= nhڏn!82h-wxs~Zۃi z| zHv3 Fz7kDÅ@kd~ݒ>ݒO |,[wʼ O܁;SI8^:Q,鞕n2Ӵ,fm_1?j͵Kn\b}ς8瑧DgӸ!:X-)\\_Jix/{{[L˝;5-ӻ1z+Dժ0@c#j1=Ut́닖}W}G=Y8n"v=CYcR1!z2 1]v) |6q pR h}/M_XHAurmt?Fz37k d8Se^iYGzJ@-h[OSlORjSDzҗeGߌ D.G7t~~3g*o7?n)S/&y=JRbsO=a?VZy?fq>9ĦWޚ=8U>9mH ]A;l  Z1NǸWͫwZ^^fW+Wwh:+ZczkBr_\K)܈>@o'򗩷@>Ʒ77=TzszSOMQ͛f+_}"__n^ɣhνJޔo5OHb;ϴz\Kkq*Q>뫟v}" fq;9'~֗[GVS}Qi?ff$Hln7;7qvCcq5S|U/O?yK:s|C΍:/*7nAN.t?{gq񯂘a5Y?ȣٕސ4 Dw,9 `.;7q$qh݃THv̠Q̪zCq/xo8%lݏ:#O $}3}:0֟}t=aa<_M^5Q[Vkíojǽп{;p޼7w?Ļy~y;x/`Xga xv)Kߛtj6M[^waּ!.ĸ`GK <7v3kO|lhY98.͟yBvӫXimJ ]^fIXBqwj9 #Sŋ sRԣ_m,8 TXV 7rHBb8SXoX37_jm[HAQE5CtZuւD`W/ 9 k0klg@`S^@_drmn~b<+.}ЪEڦm7,P@^d^q亢;rhAV-V6J<.J&9Z-|*em2HXH󌶕) Ub.)-U8IP48ZeF:TBDc *\ Sٓ:i̜s2!F17|TUkHFUVN |m)KicPI.rfW%ZpRqcF Ӝ{$+ J%*=}YFi< dc¿F¿eR@ ^`6:H0Fs\FTuZGxLr1Mn"vD&b7Mn"vD&b7Mn"vD&b7Mn"vD&b7Mn"vD&b7Mn"v.&+4A z\i:kn=t=Qz8g>~y=)Ɨ S)fzWY67j"BI o*s{$فq:#fG,j-OMzZ9¸?y8;\wڗ yCO6oeXNq#Ug; 9u{[Vʩ HĿ ԻIm*j xӠ7P!V{wvp3~:a?[!龗i?t5ݨ@]Xw3-|*G0_Sv4b~He;X(\oSqSy cg^PKZ#^1֚ꝦRWH\b$&q5I\MjW$&q5I\MjW$&q5I\MjW$&q5I\MjW$&q5I\MjW$&q5I\MWC/Kj8WWBuF\mZc oB#+<%\uUE(vkEߣaQb?'Pzm,+CnVS>Ul/>[|QVZ1B*R\k+xiE'>$!>sZa]MW'6QbTW~|]n-E-$Z\(ǑW~SC.,e/a9d!IS 篇g}rݍMEoĐzj`x )fEMшlDWA+qfA܄Aܘ9"1Eumbc)˵x. >n>bS.Ɲӹ=<̮^w|SӫU6|g4B<*ܧ~x)wζS6i}ny 'a*F0/xa #^ˆF0/xa #^ˆF0/xa #^ˆF0/xa #^ˆF0/xa #^ˆF0/xa ;^ӭf;!J[[n5]҆/mmwH){ ft of5g;x3{W:7kBN7+ 9͜7>^Xk3B5J1f/KAlH, <cc٢gR #>:\Ndž;Q@k 'Y_Z_o;߸/YF8zWURی{v^#c:pp벲0×7*L|(]l$-Aǧ@Bci `5pU@Kq5”= etWQXLs򃑁)Yya1 (-B4֧f}>shv>eԳؕ5^*F*XE BhSUJ7/ mpp;-b1?o 㴚5 mW7pQtыXAzb4r .fT)M"T~r&r` Y/#O"$ 8M>8f:X/'*\`IRfүAojoԻń>Zq׉׺$](G[7_QiBSa/j-S p,LEH,Y }hG-O(i[Bm{ʽli˲I#x inyl0VqJ/CGb\D+Ry Scc옽mt-fBQ c%.ڒKaUV O]ܙX6d( &{eLV) uVzɜBɨ 53gǮbk7;ڊ5>㌼D@tbJG+])s&gJ0%C5I63e 2d BL0DL0(E.G_a,Ih0tM}=n#J7%CJTWA2 >%+/jS^_\~و_/n΍`;鐢8A %WdrixA{u\P܅KDɜÆDGpa 궦zZ 2xOzghv?rk:h479]m <(+n ͆496(t =mȇHauedL"J㡅0jsF N+d{Qhc[=[,lZll޽b{#764A늻Mc7n|f!_,i,46puWWӫ77s0jǍOnpݛx| -/6pۘ{(۶>ƐsgGw׳ko~wU_iUwvS){Qn~a@:բ+^֪xʥ>Kz-rWhqqWh]qW`c煣\f Sy{bw,s9uWϳV(z]g+NjWϝd~A0j. ?|LiC`/)| GuܔNk$W?1 UoZY`er# G qVr ;uho͓7!]8sm|7]uM/ Ka{r#._ƫ`S$(CnI5H^#-Sg{^~T к]Z_? S [Jnj8'd__>O a5VC$s}ʒ?^L}H%MQKh{4ؚkj%Ӂ8_>~ѡ` u&XCi6S] cg‡k`_zO !w%|=u]v]ڷ]:Cw%v] 3W뮸+ֱ+AN\ܕZ!wޝdܣʵ]Ƒ:Cwts?Nu%կ7.~Yl 3vɏ,c^ڎ^ ~ I{^{oӼ=_vӕR9(xc_tcVЍ~y='p_S)B*4>eTPdtw/}-ws%pu7:N{itَYFGzJ礐Y$rE+_dQ(.*A3{BԊ;X:TNK"2 _Jh0+cGy*dI6jUDdrfee )eUқ—1y_R7+JfxLKkL-FXfHm~=\_E-\_^/I-j &WL/ 'K#[x=B2H�}{렇w\tά3eK h%-ܜei2/XŠJw]t]V]N:Cw ]ZgFMwŠ^t]~[ Z)$3tW+tܡ ծ+ \꺻;zq]>Ty\pЄ^\%ph.-$-WIJ:j9 $ӓVO0q]&|$IљAz\z, ֘^ \%q)J tpgWDWI`.2(.OE!\ +/h9L\. HU (52Cv®g WqqP JZ3LWҰtxu3rM|~ThaW j:-p˼Q^(_=C n{$Z9kY8S,Kx; X%YPМ*Q**]zeD3ͽ1r$eNIF?Gj)^PlӤYMNC~AC_l߽:.^06Sw37_~ qj&+?}G! QƠ#E$v9C$D(C$mYռ߀Pun2^eD/|9mtt1E_qA_7)+lz|b o`~6l%r2!l0vh/H?d6W[#$z8O)&e\(Z䷲o+xMw}׉3 Q9 2Rh2b 8$Dkyx!g`kQ?g>yKX 0E}q~ǮP>.z2QHY5pZ0k5fh2b=6тx .JkXi\XЬG/򭐽l2\ UK9ʯΆCm_taq7nݫJw>xi7zw].LqNKqnތn2~7cqV/qIѿ >N?.6j~|(5B]a"R۫R폷|ݑDxM)K.{jc&QTϵrL`.r \pjW8rBj`Hj݄, jxT~8 8`$PproQeCmM݆ަ|2M$}l`kn-r_NO‡>ATfeBݒo)'Q J>zPY2wiI[UvԔ8٨6<,|BQ۝ "XPqT#iDq 7("*jEFd#VR>RLzSNq飶ěU`̙Rm#ckF|ְ58 EXHI'…DmY7T=L1jMEsWr3+Gll4`À#Ai0%Eҡs:FTȠ3 j-lR4Y$ڃ"#a:0- 9ZFٍv4+\P58j:<& aK*4?VF)%3Z p\6$Ea^hU3`b,fF|$('q0#4-akÒԏd! b[q,"v!6Njʍ6ȧ# u(S\# 4N a 1fQZadg($^2Xo(aK)-#bkF7.ym\ٚs#qE`K#@4'Ehkq1pA-px>̹%8[9;Oau4ܖsh\,#{,ty#g7AGCd?iQt$^V93pތFﲲmWt+8W 9`0ST˼^4 *N.& ?%P(R@X hj?ޯGbd_1aݙAZXt8ʐMwLIOyTN+,Kws5`\q$.rO\9}>AEr-'h!*;d;UΥqɍ2wHFJ31.*yFe~nQG؉lK)L%Z "k?TG095Qq1 XTvT_ODk{_iu&]O;gnoa)Vъ{I|lo^u*@ DI7VdȡtyUz{НbG2D}7 d%W{ռ,QJ07\܎ v^qlkYW:WA͟xuA:.՜7^t{KⱹZ((VK-%>R Ha:7MwQ ~oL^wa* >܃ٽt;W\=iջ5,9>ܟVzW{rS:*dE1`8# :vJ{i 9cit(L(89U2t4*kc&eRAsglrD`p'1>yך8-xlذY|vml$)]f^1'b)Y,GwÕ-krAIX,R+]δTbAF2@n#s]3 ;JUf{~Rh0n(^T B(fWI䜤jLrd1! nQΫSrR:'|鿹} !$.P>r]d;XdҌ_N^ʹf%/-FJ.*X EX%8e6ugB-*R±".jC O \i1'J]sDQ3[)U"(Z,f y%L X‚wDðqe`hM0ޞR$߹nNCzW'n'M5Q$%J1 4jxО]>ϏV|iLQDR5)S .!TtA PxO;3Lo99򼠧C!($#k8~ǖQFVG^'0~Li2UYg/^d[PhˡDn(9BӀ)8 ӛ?e&IYSgQ7濇mhxgã^`hOip>sEAnGAJ g" f1SEBpm߃L˸Wռ*[~ݚCg[]~NK\Ëʷ>m:S{hΛ)\?ϦWUTC7o L l9ޥ[ 8ΝFR\*=EΆIqHjUwˮ U#S=D'.uW>T&i?f0 il S(Qٺӧ[lr*L: ?XCN;TѬgϥ[@ojʊKu> Yjq?\_b_l0z\"|[4Nx|^o %ůGW*Ajm\?(cۏדOtZ[ak*.(Rni1~{H s`w{s=JSP%JϷb:.gہfJ`F*3C yP!vy%i~B'< ,SU$[giHIɔ2رՎY9낥AYihX)Ql=N;haDEJfCmݖ8a*qquz;3˕>޾S@hݺضcy݇.GZ[oL{Չk>msCKF g^1Ͼ8EV3k, 4W&qHwmmKy9{;RwWM@NN@8vpv Rlg}Wɤ.ԐFp3jұ cK&=n'YHfsf'\J#aQrG0"#dgȉagELZ(dFH9Ņ`P!xSx3DY uGmyEle6&s,ѴEd H2 -)4uVoqi 77@rG/v;Ot,eN%-\ș\ 2G8 2sx,o5-\0ո/cWJ{_# '"YF.F$GȢ;XOtg$3exle}OWSDa1Vkы4p `0(1bc4K )w#n+RKlh1,ȓ0DI+$ " )Vΐ"y)]P)x5<=׳f߳'ؙBS?[ӭҋ¢wZ9#ɏi0|u8LΎ(1|? *|8^M3-x !D&NɹV‰Nӥ}9^7%42@cɻĬR8/4nv#u#˱ub9I#N~L_W}y5 <*ܢBaLIyt5xW]D &u։ Z>>n/5g/no.Y'f_ Ciq>y4<-خ9Gxvu|rpGi=-- ͶrsKۚ[6>Yަr`qZB&Ru//@ߟ4M4|Үm[vi\7\BJʉ 8/Hx9k}agHb{H&du$NKшZEKU$mtd&!h|w6ٰ֞?phY(E6y#CNFHL9*|J[\H^LPlj8iĎgx8v zN#v[#vuDlU}qvY?t k'B}`P .H<@B[E<ٚBJd1+iSXG6Z -X.%DfaL6r- Em=Z"5S2T̠"RbM8[WW0|iixN+k2#K6$%ƨMX$ 2O 9z)H4rM @Ą*g!Lje(A' xT F؋g}WLĜn '7$Ec2}|e+RtYF?!Y0F&k\&'A`3JE#xZ@6m>$R.$GuRǮl=zOtm`[K/fgP<;Fݸ;pRPIm>*)or@N&\YgLdqW(Ȥ>SeHqKC/g'\ *e bpAD@O)#hjjՉ{ˁh,q3Q߂6*f"Ā!\M { RKM۽%w!Xn Vupd iYXhu@O+o#;gom2h^p<`ޛhؒ}x`sޱr6޾9}IX̞iF4.X^4`D-ZEeB|VMoתWZ jLɒ „$7ưV QzROp6J@J.#ɥȲv6#WIA=$M{M H41jLlj}n҃k}=xyß˻yϑ YÐlS+a`^H)0:6tVDЍLgD07@يXrl+Yqde2SagkSaޢ0a܆JMR9RH%2 c> IHΊhmh% tV_(MU ;wpݣ] MgltX8Z=r\( /dY(״»SAEo3yI:>[>o.=lGQpM7acf4҅ Ry*rWᤡv6%Mk8TBOlL QiURDe7JTec}f-n#qAS.-3{B,3OMXGFr$awYg5~sI}hПc6?njPg镗RhsegI$heD!s,R L+`# ت|͉w8ƽ"uFב. d [Jו-a]e x=91V7-)co$˫tRRi d2G1auY3G |JA0 f# Bn+ώD>}x{bT) AO>R!DF f-DT@{N?*7 ÃׁȂc<): 1ɜt&F 2E6aiL*Hݸ]ҊTPtXN6b0,DGF#z JP_m]^Y,}o:MK}I)s2 gb\Y"(Kr2LWB*2Pd-3]M+8r#9h]W|LC< Y*'BQ)mԒ,XXcD>^|Xg+wKncbևBro3Xŝ޴/ݦ滋4X>QݧhR?>]+'j6p26 rsW?{7?'[E6;Ϟ ~TOfUW4m9HE=#fmWRS#Q*͕GThbBm3W謪*YI]A G}j01ֺ(#0NFn*6jCuiUhώ Cpd5DDQilw/4b{ I=ҒŤ" ВNAf&DƳ6f&(p6$ɜ4Pa}flQ w)OֽXmNWR\}+qZ yH%LY ΪȣY{88%JZZf}yX:V3B}pUjԞ`k̢Þd&]mDە+]mxdWIxW\p3\mػv}+0* *"k;\)=\\ .\q \dsسW ?R"t,EZzcpVkWW(,]bW$*Z"iA@HWW0CpECUWUֈIzzp\Akۙ롭 TNh :p:gΈ+ozr35h̿#+Ϡ, ' \~{~+ t ZwVtIZ[*kOz~=0%VЦ3pUb(ֳ"=\B2JR+Xۭ \q\:{Hizv*,tH` (l1BضUrpuO|XUǏ18ĕp\z%%g-bπ+ծka vH`***ҚװUv:n49/]Թ}WzZ%]$~^yr^NO8,[fF^>f1Th &S9*Ym~rR4Ps߽JWQweoï_ן~\hхw"g=4]h=cytGw %= XM ٻ6rdWyYCۼɢAʬӥH EBBZGKkwS /Z~MBѪ l׽׈Y7WJyŰ].|r[_1C@GQ _`7_<|<7G09QHYc<%W4}OQq*jەKٵʥ#U.H+&FE \,F4kr9z`E\uvRdb_+\KkғBn;m;y]y1&Wk~Nt{eKR" #$O.a$QodV*md(5X;`Ҕ1x}=& |YƂ SzȬT,fX;n ^vH߰hdrZ֝~~7][t5%Y>lJ̲5lngO&kap >&`XT6@(v&taw: ^ϕ^|;qLzqmƢ}ccv{̷I.>^uޠxF,UVq<`*Z}7h!Fo *`rf>_BQ4nP)T[i t#; t#YֻC:J k/b 9X^,BtZ'#0$)*)]2?>:SR,l$mj%d'Y.H\+-Tkmx0<&eI^$w&=UϭqcmK_dH /)<3^(۫Y\npz֛Mz륻LXZ<1sZ\!;).MN\$bupR\2eP^Ks?8?ڲ*iS(.X DA<: 9"dMwB/vBUby.쒲S+}ul=URB/*R  k/HioBx/H6IӘپ^KYH@h^׳m,Rh +$BO{S6{yŵhE 1dm4üZ\ Μ> pQ(ΠQd<E-ܐ*q{Nku]_ȟ:y#ao)G`Rl=9w%W>/4C:7Dc~3q~8zXW㓚n[ݲ [2u- -ec48#Zy]*)g._8|?_{9T ׉Ea~Ӡj[6Ӵ׋O.ioUtZM݀ۑ) Su?=5/nGWq$-HHK!EclBTI R}Nv.,k>(Q*`CJFQISLQOȳaPȣ Q} ϚX3A"A>(:W :Ke"&fCBnӫEaNRpکhɇd^RSΈ53 kTBrRĹ-d4O;&uD"RVQhGdtU5&A#)3H RAo7!.i5(T2Fk2R\%% н("' ZdSP`кŃg+uWa)`FXUTTYB?1'Qs=KūB1IM)7֫>gGW#,c)^o@>(j)r@:Bױm47{'еo8οh Ի8h^1m>O1oOK@C-@/ Z4 _{}}Rh,yHȂW`E 9q d+e A#JYbz᳐AQ>YI'*h7Q:!b+Й8s RmB>/:ݻ`i>ivw LӇjYJ;=: ѕb@}mPXG9&j+r]^dhSpԶ8%J .R dhHRBw -.dRw>9qP3LP ( DZ2u&mt[6.>~qʦhcyݹq~7o>6wJiE9"0T|FD^x O+o'ʻ@o_AJhiIi;1b7Oofi=&x\uj3 B@!Fy{Vӳe5 kuɁwB=S\,dS9+LtF=F#&0} 9b/ m )A2JPQ1:eK3qF׭WP|mnx3yrVZ_Cxmف܁>D^Ջ`}TmW]ŵTۮ"z*}oڶ€윩l~Ux3"G Tkl[H#L=J=3{sdFα;q Az*Bqb"eϟIe|4 @c12Ts'V KUlׯ$YSmS{qRi}qU3a ݟ:y?n.!NX"iKPiP&j;l26>8O7}`Sܑ5Rl60<=e~ -"Gg-)h)v+l<1x$! DN|H|y5&XE0w S{Z6r< Cy UT%&aI`Eug3r5'OKz<ʥ~p>۳&O ke([VZHVCʅKb+-xCZI"/RLghe$""#Zz(vAS* =dT>ݓF7Ͳ'?ԅEnk?/'˜nz[WNS ~7nvz3-%%V$:?AGzG^f3撼-"G/ 2l2/+A Ent[\5ftL i:y U6bn像2f)6_j$rZ]9Ze~y%Wazb.3oލTg5QֲYW.z Zf{TB>P7w6\~X4]}Ӑ2A1uuZ݀q5aD7׎Ƴ-u/s7*]<򧛂'Uk)ݼ Ten16=Ȯ~CsWmeG>hcbX9tef5FV}g?k$2SAxT  R tJ|FQ$I԰=Bc'w"m_t;ۼd)3$HH8mx4z:rFu"Hm.İ;e TlC:Lua繪8J*㛱M3݄fBi>XqtK M:zԱM ɟRHYU)MF !_>akäs׆ N En½mj͕t I(w^bB%swT^!JH4& BZ#=˜ڥ t)V{}3_[)yG煓NtHUdQF4$K]I,9,PO81 )58A\m8p8=NFFq/?LN t5K׵^%+*034Hd-[ wT=\c,KQI U¡`8^*<:B hc0 S inKMW XQ,RH:O42h5X48eA">]:|3yKHז:g@lXδ0'ZɒĆk{5&Uݿ/YN|~Cy;6E%wL,8 QJ5%X1ċ?"m,[>`C]Zi`-q\&;$Rxk9gW/rm(jPOmZt)\TOdy F8PXZoq zj_,>͟U$v~r7 X&&]X.ϋCmlu]_/ɧy?uwfk{ߦ![-diAօL[Ԋ(;jKٕiNT:H3IA U-ɝe)9kdilHe`S @ :QO[UנUem6At=]GiDd;(3)-Ϙod8FB*MQp5ؾ?}qh"Q(P%㸨IYK@F$KEUN dVbZuf(Fui0xVkuhe#p}p~˰v6~Ԝ>^c'`ɸx{ܜj䪮":xP*8lY'ݟAkyv?Ts^l8ʱ u=eܙQ>Isqpg\s7cOdQ|e8AhPN(շq4<ٖW 99tS [sl|%95ri;Uvztyi/'eJ_3BfzZ6Ixwɵr6N^ګvoN]eFyݰg ".D'Wtr[ ؉whIU@JJToޒrN|Si:-\Il8Oަ+I/J@s% ,_Ir̻h& !2$Êql^0ȉ`(Azwp%+I{ԝol=2sUg^gn-U7Ul*Ylycs}OV_e%?.қ϶T(yC`t6fU!,|/~j75W(j^)ɆO7!^.6!{ó|Kō @x­o!4?/kj5o}ysn3B%e{܆}3azs$9"$s$]ٮhΑsQrzDWp }v%5iG*lHWTGtU+z2ZͻNWR(ց^])c=+X*h:]ef/]/_ƹ^3~p%9/]Ԟ: 蚺{ЕЮL= Phw2JIztus#ʀ ]!\IzCW-<]e t銣GtUk/th)]%pڣ+ 2\FBW]reЕTAiU2ZyuQ°" XM{QP7~e8#NF8}^]ڬ#_q~}3|WP9٪9.opQ_~sL ߳L{=g9WD\$ʢC$1(2\, = 89j!Pe׋Ck,$ԁRa0CdQi-) .y9 %$2G3iiA9:2}frj؟OQ &lTRJ# syr4@BTAԜ4(P#QVLH gQr/L2y0*}_ZWӉ3[}={ۍ`;ww#؏]{hUpUyeW. lC%վͯT[I* N|K8ߔZM恋Lr>YlB|,Y~ et{mDP9H ^1)dIȐuF/~l7m|ٗ_5[G^p;soۼ47Szvcn?o, ~kE$x EMVxE2;z1O8{u PӉ1ƫDw~@?|~q'턳V^NlLc7 >=wcy9Uӟぺ ff%Q dsȸ'J}d%RHɩ AlQ]p U<4m'IōIjzdV~}ђ+r [4p5=6=w_]~nkz;}v"r/l{g 9y:+꺙s<+7_eRL*([gmISR9c{GHܦ"ewY։.2ne2ɜS  ~krFv ˬRӍ)7hA*[ֿ() fKfQo4_2'H$Wez>Yw?8tx : 2$)l\ 0E)l9n[5yXrl֨O|f|L7םvsґMIXkӽ|jeoö>,찓x]߈*_5aB ?yMn+0UE W4 k$·)9v |`tZ5oDUw%:{ )^.%\6vK,cUyeַeq׺\Żcjz Ad$');) >L d)m=;Wk))Hӂgo@aވ%wZU0VHO87DE^Єr  SY4 ̅Lϋ(ݻ";GEvL/ܜ'd wJqX֠z'E n\eoCP-a0jr `CL )@.2KəeNK+D9t=ܯ ./>龧@VF H>xF&Ik+mA r<+#jdETJE0B*i-pT 0+j5q*ޔ"͜`)Λ0Rc_JTӓ3K%;s? ; k}I'.VcGF*b/^x5)*uT]1> EYZ56ښ#fI, ׃FL.b `\KϹI9azkiƹPR p+QCA%mnXE\NuB'ٗx=vd!{PXR9`{x@:}\ 죋ed붭VfPȞO]ܦD!hHFD:ɬ9bf8=vM׮gzui5}vK8Љ'##w.}:gY3ddcF3BU!2@,:J$3 ( 5GQfVE"d?&qؒMgq\*{D9x#xIge5!YR!ZA#G4i` 8pȦGt3 7F ɀτ"Qs#Hf'Ho8DbJ9'W/.q)YMK-)E3/FdVڠ#n"D΄r h /(ǸFH.d /x \I;q:\ض%޵shX^.NN{<<=Ƞs8~퀛nkm=}CN>>N_##v?x{=):Rb3\~dέ& mܶͩ먾s~$ ʯ eo߆cw1OLj"cI{Lj":F#[%:F.fZ֑ƪ#(ܤ[}SDBIoCn 2\*kU"Cbun=4(g1JWFkt ;2 ɦs LZI笑`{|q?AZsU\)] :,ǛgҦbv0􍘩/j'uTmgcoԗ0^4sy6o8ᣌ] |xZUêxbZUXUNTBH,(2``Q[m$Y0Gar%b+pf CÊa Yfv{z_u}OSLX~ZsR tCdh]23c\\Ps! f:-*.4Y0W9Fǹvs_*oYt*z]CQ،#*mpȨH;憎s܂⬟ C,,玵ͧc־5Ck9jW 򍘥robўڬ{Ҩ649%WH0pj7$b==K˖(=*Q*+Rg._S_ib12bdʩĹX Yf,Θ YL"!:Wj*!Je2r!oU]IfɤT MwɱT{^5qN$~[o#ȾIfb0]rçār贍muky6Ig;)TM;cy4gyU1s0dL{/U.{ދi{;O)0Fe2$DT$|A~,=j BcP78UȦڤ_ om'U6=Ece=^VH:Ht,NA8ǐ%/q55ԠU yD jBoJ3)GҒ'RM`Hu B M'4ٛDY.XYh -L,*)]eN.up(;f]]aogg)i55=zug(b'jy].amO[8S9SV?|Ɠ~HGĠmnz[ϢdDJ@b=L\ݳ2^j$xw׬mҪL/2`VHmoӛߋtY\>|4􄾴ѩ[|։g폛^ÇGt܁amY'E܌ͭ qeY@ħRn*&|=٣%F!\ ̾WOjq翎y2z==۝h:a٦>F}$ W뺌輌d272ڄY XftyЋk7'9t^UWdUȊǍ +RcaYqͯh>Y^ qa8?MӟNjX*>nq~~%e*Jmz*!AS&BDiE7$Ǔ FꔌC*<ذ=1[A:SP\pJElp>0-ў;Rk4̛uȞ4.mIؘNlP4\L~FP6i9rCy-Z3쇶 mgf)!"Z 2 \BP:FxɍǨL2=yD<BvJa-"ȳf^J$!)ioqT)l;ryM9!›I)$XYmPV1d:v# dѦMځ{5 SKiNH=Y#\{*i<4xM3'\+ C ҦBLsřO!!h`^:bޓѠeB)gXCU Xy rJh@J^ dm2,d:Wh Rn<:OKW#HX]J𿠄7IHևw" kCL ;|ɥp]7ǞrM|#a=S| bm]Fou%jN8̥Y`#&s)Nޡd. ̥YJM̥0sD̥yPJp+RYj`k+4ESB QsʺlV[d,+u7gE[m]lݲS6RyC!\(g^ Ä R0c{tV-qakEqF:k9~CL$]Ԃok T0Q"\AĹHmMx{wXxZ$IOO)|ZIp\n{~^Ir9s{r~1G*ap5.9GhF;0Bߞq\Ks2>4ʏ0Uc cJ(!l2G풄-kY@'uC9E^ z#xW]<ՖºկPaK_7/Ѷ&sJaB;cl ~z,UvGnntsR.-tteb깡DZψ淕[x"`ݠ6Kw}\L$hcI֥``JC/CHv %S&h2iT1lJgZ}\Gy(Aq*LU]]']U RGoU Xʽ=L'1оqQz Eq<{WKWi) zKG?qj!`I\5m~Ӵ[)\\կ?_%Wx=k$~*|b9GDIKoőWTRgڻؚI׏[M_rPp|&ׁY;yl_jF3%WV[bCĨ䌉&JXfNO6n5-a2U]}sQa+F"/ӱ/S՗+z朙"̊^!ԄOz^ɅUo[^2OW <_o&c%w7RDoWHh,/?\Y%xYm^ Tr4\fqͧ0R:w:Ax^vkvcFkvE]f/Kᚒ;j'++?æexQ v4LN^I}5b<{8ɸ?< X)sĤ_2D꟞9|.x4 Y0Sy~VYUB!x2.vWOE:I8x}|Ez-GLȞW\>{yEe*-$*LIoAj#`]a$e*՚+X9]:mO#Ni6׵n WFLp&ðPbNޣUx0r|a.6Q)rw Nwd~nxJ96<.R}Oy1Ϧ;)@ ;$#Պ5w?oNy{v҅'F8q"!~?\TU]A]I{Ms5iOwhi_X=Ѻ.J: DXtca5 2e'e>0`Lk'%|8YC$hmA$)thfw2B :`=}v<L$(R(p`x4z:̩Q` cܥ`-̀^@Ni]DƢƕ.yNjg6F [Um蚣LS!AyT`~ڟ&Y.-FLAsdM )RpK>6brx(ky~KӺE-Y+[(e$𝷊+`ˁ Kcgp3G#8m52(iH]O!(ZO2XグBVH8԰ۘ89/V|v>^j+X|O2\§wް|c" m^5`2FLg^Ք(OfبɓJ%OfvYSϓ\.Oɓ \Ӄ,TWYZm+RJ\9 B4WY\ W(u*K:r 8pD WX};Wh5f?p()5+x\AW=0z@p`*kPvRr3+Ɖ`˕j0:*:NCxoqbEx&!Yo>fG N!asxe\ 6gii;6g)W 6?lPW(a`*͡UVUR!\ IW(0r~(pԡlճ+)aKzz2JvYrYq; "[؟֤9V2QٓGx7_~ULFt6Eߑ%e%6wT#cQzX~{v3ɪq00*z000g8 l8Y\8욹=\e)ygU>G`WZ=\eq`QZ*ZR2YW@qiG WY\ЇW(lu?{Ƒ 8 C% 9ٍ 6,a"L !DJ8hlf_?jjzZu"HE)+騫 Wь}䤽#+Л.Q~( &VWvwC+ QЕفLKWz k~:tናVcR^ ]1(+9pOG]e*HW\pBtkz2tpBW-gNWmjճQʀ;p<B}*6vJ?(I9읩7c5.O0tAKg4w^xwzq/UZ8AS,?}OK6,h:io?X8+~Qs1/s_ ̰_xH.>S# !<xvhwڥj }-};ӫ|[!z-4f "U}#R-,-"T'p™"1T{hGQޒ+W#˝e)8kdalHE~M3ȚZlG6 GPIPhJ) ^tƂѱ%$i] JPodp ^kfE4 CK̈S$^ ]))=,>(?p9 ]e3gZzt4]i d*ÕT WWe^9! >NP]E*լ<<ky=GA(S;Q^ Byvfx]~ .g!/1t;N.|8ĩWcpeX2ְbߍ3^k5%J86Dy׫ufhEUO,\@ \m}&^?ޥExՌZ]Nn?|x?L޺ b_-!f9f>_8 9S<4M߬BxyuכWx,f7JvFOb8,}o||4)uf]C:p!䲯C.eYYtfYGKό' |%\l0v'tUg9u)KزųWqֹ[XݚRu;6aTh n45)wa9ZjQ OMz/>_Ɨ hwU,A R.n]|g+RS=-V m({ֳ-:?8KNؙ=%5m %]L^o;vp$?"^AU,Qmh%SYa<#a4'{Bjtr@l=uǤII)]; 7NgH80-I*R Fbz K ?.X?rr*?#bX"GsmI"}mQ^BggŸW?TpC H`Z6]UX;McZ^5m:qG|Qm ԇ΍6-TZ~Xx5/ ;`ЙzZ\دa?W}uւ#h|V>A~fCMڋmmM t^=(dyFpT`żeGO_g4W_\[+#׺lZ7Њ̪iǥN&iR'FvigTE-}"X?t~}5 ~^.9[5P ˯tFU_QmTzOS%_&H$C|?߼~*> ̢ p7_p꿽imS[V5lSlՋߦ^d7y^J C1]f}Px) U)7|džTG7N>{ڴ }$v*A+fM9" ,I&9\0Bd:Thlx`Ò\z8ƀ3 6{d>PtBBLAj邓Hb)AZeD{:ә|Ʈqč8:P?}JgۣBy읝#OYPޘs9gjvw`Osf)6o(U{ c8I$vD8DG&2GT xj9 \{@Z,pKs"ȔT)( 2{ 2XBK";|FUiA :YcR9 hq1ʹJ&bI38s|]iԆ4^ߑ?}#5ckF~VD mwl};,R\BhWf%_iy j]Qm$"*NY;r~YS^.nY[6秅sy G1y9:+08CYAH\kJAb"&?ygLD0ÝI!i 4h'ʂ-WUCpFU!hf Jq&{Oٔ5,YֳQcpGȉqmh,(r'H[尔 +rޡnw/xnM3eϷ%ZaR'| X(թ3BFEizIH@Ր6b 'ؼ2;4֔NCo_ͪ box9kmȿJ*xgϴ8)`0 x5A Fboi^%pֱ$t80Wz5jۊH%<(0pA&& f)F&{% pJnM{|s!(\Xtl $ ')cwmH4vC@n6;sADcYJr2A[HPv;b{Vw5Y_U$!#zj*x* s;0xDΜ߂r@ y??Rw#A9/څE 9nfmZbwN5^cktsm%k*8I PDЫfTh/z5¸6$B18$3,Qڜ};^ bA| 0Y6"(5t}MC e_9J}14{yf/mS1@GFXCٻi*-7k\L M@em(ZXmj#*ZmCW 6yZl{7_1v?U4'Ȫ59#\FD4r059ͼ?%TҘ$&C#Ԯgc|x{vxxwkt^q6]p0wOV4!lR A%D I-AB`hR`&`B x(b>tTϠy8È3kOBSĜ )^po @8*\3h"'u&lo,@񞨠-5TL%ʍx!>iI 6<!0m׊ 4H}1eUcFXC=2{WQ8d0tQE=@0=,J(,E,!c&ʦTPy:q.eQ;@gHb& oߢ?UI /pD+"Hl\0++S R hUsi]^z;G!MՎ3eL)2_dT}{̚0OE3= 瞡_~Q O:|Jm5,L]C][ f̴y9mrygG]Z͛gGnkM:Gs*.Ѝ%iASFuw7on w*ou9󓖣__Lq6 ig&@홦}q -Q#HZ(EV5= *coVҴEv5ݴܶEJ~-я8qY&Ol1ʽꟹMq^۟ /yǏor Pש?DZ gLd5Q*jvty'4WuNmUzyN/.U .?yUDnJ_9/sɪU򹔚Ћ^m?ԃIa1 k&wzC@WgZ-]4~/K ]DnW[YU֣W7~Su3&k3i&W**w]4ht1V+ ~}U'mJO-s]1 o2:} ;[|lseÚjVox1Oϲk̟ϙU $r[ŵϦ5MsbX5(ZM6vqf6vg$qr3ckvX>gB'])Fs4 PST;6$sc$*FHbUN%tiKfڅ98o7qV,?-޼8OaͰ=,< w|*Vnd O8?ptSѶ ~&o/OSw$("M5{5wRI8$xn}gngM[n݄cV_˖M̲EN܇ZG67؊Cg"Jއc7k^6~KbX#;Ik%z6T1)y6һ*mE~wUd$x7GN@ݥ%sI102QybF66U9YͤlҶN%t-fzzolVW^J \HudQEd$ K Gu@#  EWx m0QbC<_{`m ^kn<}{p颚*ZR-ZxoGd#kFTi2Q>piԚ)c8q{qs.Ȥh<.^1romiȔ*̠irܯ:I&G4{o6Sv+5'C_ڸؒ7*dDҌ҆oAAʖα!HPGʵrCR$\:` Cd H1r+ tE|.jEN( D0$N+)CB~uI;8 r aYĐS? .4YDJFqAuFRj&D":CWYh_ލW5usm>QECo3493C`sl.D|'stN3z#ZhAT]M~Y6ږ/y~72TVŇ#ғjxy*"~eԮ;}\_ 墡LT>a5F9X?ճQ{.ԅ4lQc$zxmi'y3-Z:±8ݰ.yQ˶DK{Q7x~z֣ռ.e|GWU+TFr =7g_<a7wUgKω>چؽEV*(2"zOR,.В"'P[-EaN*N"7s:t]2Kn%R|6F>nH3-"nYCG9nuDx0HÿT9 lVzr>}K^.ess5~Ԕ,I]N>VYh 0=jp򼪮'sݛȾot~ʴ搜xW.о&<&Cp:IsrP[>dP{|f{)3RT8}ϻC N0MNޒ7tyt8?spm#RAB2(9`YKEWVi/$d(Xi0+.*_uqnxo|G]!Vs9i-D寽5Zׂ[["x of(xbT'=[Edv+T'<ɼ~mҷmv\n;'Nw]֋-`\(Gu.&28$< y%A x|[[/EnJ!})APOmV}!CsJgOᑶon|:*$ڔI5 v!\tWgeuwv+0dSw(ԅHڡB2Ut̴AiC8 AMUya>\ |)/7{21 #.eDZ00⮈5$K9e6k׭/Ujlȩ A%qԲsatVڸ: m/OPt<։®Nq# \2>_tNxMpev?Fegxzzo6GXʼnB|&ra e6/#~@Qk'Nm6y8_#qxSI(#f㛣5q8ѓVWsf~1G{/ͶQ)bήm=uv-aܒUqlXQ2{?Yf~qQϋx+p93ȱ]/Hc%l\fk ѵeKrުu^WOǬ}:UMۉ|kw6B{!9SV?]_^u|:ތRIߒq.2eY!Y)Hf>Lحj&ԹZrSm \J3Y>qU>ߢtkl{ѵ_<[:hK]bVhhS\?~tI5KcVaZ̠Țl\x"5R˜DhY{nt iD34{-Ռ4W]U=QKʇ< h`vəto3[@@TR4A~rZz7 ѡL6(Ԏƨ"1F.xF>8,D5nj1o3[!<6$- VRjH) AF5C$ޱZuC)hK -fbўF >B.聼0ir@hBJvl$S. օܢ ith**u(:ݡ-!xq9р+H%eâaD]v|1Tb|ye],X]pl5M9ɺ(,l.qm`\baIHuPvAjʝS3)L|,xIӒ`VFhU%ٕ,i_ts͐jPob!աQ l"(^0,JLh*D~ᝣjTy>:LAZ |Nݘ.Y<`h&S18WC &NpPgk>dT j,Q r 6lC@WJokLEsR )ta7 U B!E[vok-ィ( EyhpGAyǒk0ABv cAYHP, 6C Zb@$Fu{PH&C@U+cG=DgAuPyPQJ_6!Jh+fϗZZm\U&1:#'yP|E. ʃJ I"99d^S1P>"[7zfi ey{:%+&CFPr#VH6h]`QՅE,TG7>XM|aw&dա.!Hb!a|{mop}-7."O֮2X#W`>b #Aˈ|uAyh"! 5y]%@_!8.#(v@]I/PA0F/%@rFE Y)֎ a<Qy@oBR @΁u,bE5#e$BыA(PѠ;#J&l#ϱ0 t% VUD)3$?< B#jw7djތʰ*+ᠻFB A1)'jjOR)*E4wCF˟%з^TfMj:j7 JȀ=GQ˩4dsC]L1y+Ik>c/Ԇv~sߑ䍡"UA .^0ˢ#u;BHnAY/xp`h\Sclci͡М6P76xk+FnQn^ $Z4AUJF7Kۃ@L&>CPXG%=߲jehdx oԈ@>bѪdMAhy "EXkʛ#ن҂F3 _B\ i䛄`=[JF$h #Ec6\W6ӥ11D 0r";@jLBER!6SJ,tR@õ1Z Y\*y\gwb!(\>J+ew 0H5[x#/Gz/n9>>qz)';M$k4_8jcp'`Ԛd|3wXS.,l>q>ζȿE~ݸH:&'G'K'>u'P:/N;J@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N {MN j=N Mv5Nqj ˠN@ x8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@ b\(f8'uz-N FN F tz}N8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@ dxrMN 1|\Z; zN 8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@ zKwk(~[o w]?ǟΟ`!Í@ 89Vc\fK@i4%}ϮoyKA /LW]-t0ο ]= wEWt"`ojڰb>;]1ʤ^!]o +WCW@t$t zy Íi-tI}+FiE]F:"O]O1p_ Z;]1JWIW(iO߭tY>j&Oy5?~:joPsp|@Gbs;O?~sK𔗉J˜jh3#\uɇy^?}8z6r4gooe_-[V38L̏gON:؎?~(_gPL.'?gѪq.<)6Ji̡ώ?F`\h)RqnT޵SB֫%dᔫ8Z(OBiJ٩[K>ԝZi%F/wg L]1TiQ  >o^~<3'p:9@^frV)rSdwzB!6o׭[L\tۃlhxѰ"L(zK_o?_|? &C6J)ߒ{kvTY [ն&Z`!ejhFiL-©qEtg]L(nQ&HWO].>ֆVCW ׮f_a)]B_ӶNz=tpY ]1箴NWƔػ6,Wc?bR3Lv`cL2_A=-!)9N6jɤȦZbHשsoݪ:Z;DWBXg lB[؁M.JӓU-Z=uq-U=e{$X b=]jfx sK;CWWPB´ee|[pQ=t +th9k;]!J{:CxO 3I:CWW=U C-ΐ$1\w jvBFtut%EuscˢiA F {Ex9x411h=M#Jiz>CTX:DWXUQֶTV:|P; 뎺Btn=]!J9uD;gBiWD%=]}5tlz~A$[k'|TojąK1\Y &ߺ~OK3<1]N´2vL+Bbڔ ($F39R=^iC0Ҟ le$ñt7-~z4_fX7dݭpzv~x C;Xn2I]t7Ǜ|J+rLˇ! `.Ǝjra`/zP3p f]g5UWz2_|nZvAL]޾LU-pgVyA}ϸ5jZ2-тR] yhN*MxqY it&Ae*OlCp]R-& Ix$qD2z Qlr :ñ}.j%3'qڙݞ]{{f[?v$(d4렫K< GT 3J,3*s Sԓ F꘭gѥhrNR)\j*qac=X.Tr!g=(na/CZsh 7M>W؁O%8)x WG|LQδB !؜1*:(csój@cEF h+0W"jf# IXC$h0c7g?c8VL̶vcHY^׳v`wx-@('bQseSxYΚD09FJQ>`,dPTxabA3Ap00$Q 5̇χQ?K0&$0bSXF 3"għz| r#Q#W>PwY`H)%H# fUF~3*ȺQF86+MyԂe}*)y2!cBsx3TÌ:AS{y\mu6KEEbϋOF0 7^`id%" J({0 i . "ŗX ca(q| ۞|n9r7/c`o'qyy+ar+޿\8hAp-02^p0 PUиF# dqPb}(b`1 hL>sYgq5x= 2^,s +ǍI`?Q2njy570SsɫثIluܹJXt;3.C,$`KB|FzߋD?cxH<xaL+̑=@ 6514\\nǥfYOHwOLցػj <Ÿʮ@n9b+4 zYPQsH~I "dRy&!丁c^ฝ.糰QϮȢ vab%HcTPdM΅bxi DmrUB;?ByydQPNfm3 m9=?Kf׵[,7ĔlY?!d\ul cFQ˪:ЖZɾ;qQ0T"HA(~A.|:o* kJfPc};[x{ p`y A> m>@tԡt֘Gw<!U5{P@R2Cu{2ݤyڭ1kRrpxOU9'ޟjs9WR>fc=V>P(ڃ|OyX|aR+c_\'Bat6u.rx"o4 \7 p|2JWh .B ->ďCNC"Q -Lr:iixxZ8ٱq#uSA-K{ƚpZ֤eNj QG!{EL0sRd*ɭ5i}˗6G7]XZ6@{b> Fu-N`N\ ~*>}W/_ɧw=i+szGG{ix='=1|ׯ˟dL|_)8cBj2ǛOb5u+-:wg0o5gMsnhثӨ#ADуFo߀8JY Z@% ϙ+%Lo`۱%k#∽8AHΉG>昍 4bpډ%QAOTC4{"sb:eidZa6j͙Brc.!Jb,;A]` G3>Vhӡlkgښ۸_0.udVlo*$%);ޔi /.#ȢQ,u/_J2[q%7X$wx~])7 pV2]Mm\ emV5WLqԥ(*YoO<EyLH813ʐPY +2%W(H 9fI IEeHNV%OZ+](("r?znMR:5'҈0u1/\5{::,Um9dko@'獋wnF`AS&Q})5x ˼ bfhJFp;h`ao/76R<`m kxr1g.댱fYʉ $E0FQHQxf 4=LfS'a竃ٳQjZ6]{)VF;87p4;Z9c2ϩz19+>-O"ly?4B6xTp)/s ,38JhBqV43˰Qq9a_Vn57\:E˾Yqa\=a%hf_?L~ӦFh>>V{0sTF~?":n~|je~;M4}VŦb'1&eN;iC(ՆCL iov*޾ή宝<[6ZO-06zעx@x2 $տ<}IvǏDʷR{Mdu:B&Q͕_`fS5;^V50ŽpոZE&㪈fWZ >[T@)ټsvJUMbS_3zs)̋clVMMslk\4G[^UO_|M(l{L.\-k|\MY Ϊ +LܨܪRKI<^Uΰ=s.gF2,p_OM0% dKL즃/uz~ kꖫZl:ֈ?" N4PWd'-GWiF.Ngń߆.>eJyF818rWAI@#qjk<QсA#(:ZZ%Z1W-+QgW?Mh駉-{D~0yO8#@O;4Ԇ2KdߩWUp WoҪM}>.<=HnGQr{f_nxo>@*Z5|)p\pvZV 3j¾cbM;nhmi^ tg[-;tDb ) o<;^D&!aL1`MZY)%GfX"޻{9ϡ#<ӾpZ&+ji drGEtrIEA>2:zn^918>ufl\겝ob_] =Sl_!.{^︢(L;<()_'h=1|1L}}6r0 /Yw UNSsx1l:ċ5|+Hn8\6bl?]|KrD6Aes~n bzDxl %_evVeCTA֦-E&@1!llҲ)?^si/:{ -ӎ$&q#%*r 97VÐeN,s! (0% /a:DPpC 2؂s%'I՘rZ@xF:FMgm׏^Лh@ݗGww&jٖ]IlV;c peVenNZz&aq^ʻW(:}P]#{b%5Fat(#x u@z5"'BHZBW{1O@I){੉u8 qOc HFZaD !IǢN"pR?OrKL?hڅ$٫"D L{]3 jBoK̑N%b >gzBꖐsY-jdo}f#O&`1& YTL v=fo6ACZTY~m,IbhyX1_[M D ť 3^:c|Z 73 S/^W *Ҡ:q:(6y)Lܒ"P 2La`NxtF bgiճwYP*rK'tP&2$aL?к qc4ያpYfKNzXߦ7Lil۳o.4\XWq%Aܩ ګ q`Qݼ,)4)UiպZ6/51Q|ctm`vm2quq,q ONk+nGd YQ?#20u$P7t6hF2:ayJx'ZEOƣ녞_9<.ny9ɡuTnum+ͨbOk)+:>/'&|I 8,7vj/(øaDMR?<kO\diO0LTFٲp^Uͬ~:+U57W,qS"h.^2pl,Ǵ_e4I:xܻbH6f  Ji@8g1;93ŒrIMr9) xolƆ5s>9}`pRB0elLmf>KQܙ(mwTP]Ng;;t߾) !7;{PnعcXջ<ێzstsafW|ޑ#酷ujuLWY->##=B9ޑ5 \)P1Z#.?kLFS`ct8"iCLNA_dt*,2#::' xDS)i9@|B;}MnзV0~S7 ͌4٘D`D8*hueI6W!% RNo!>Q )%F_bB))&9,3"x%QDNN PV؃g7 ԫ*yelfM*`i Y g'9gyoxTl2R: ˠ4$'l,2@0Ns]Ocfiy;>}z;n@[+Ơ4@E@z~ d4ߗt88hYDY(`V*0|6^z=<6?\@/ZŹc3τ !y˹vޣԎYd_'.jR6t2[`gd݅$<0'8szSpOƴ8_bYŗR@z1rf:FXn$j kgZA:{!x5b{:7guWbڹ_&͢fϲ<ŋK"$UxhsqO*EHsU, VY .8&1^קMJO#lzHUP\|V>F_1SfntUtܼqQì(LQo%* tV~V-KYVdL]}SEEIeKU#RVyh}2[t5lc+/.6ZWէ?L(O/>S۟}A͎va~O=~A脺UtFFOy~:8;_Gn)!S^ MI9ZDv )Bx*b]̤r9[uȾiU͏(* 7OV#>ш4b  C>I7-X|?_Ӵ;ӛPyV]OpOkyxK3U;L)1Ғxn|ٸ@<"4u2*pU%N)|uC[jgJH\p%s##A&%^zK2^'sS¶OCA ?2D}!o.} wÃFAvIsz!F>l[P5$aRkXb)c61Z&P**y8 ٓ%uhrY{:'E  @Q;YxǼƕ&bF q67D}Ѣ}]D\7Hႋ/K:ZTQ}![ P\ZM{o?f{8Z%!Zre`lEieT68ǫ§Ϡ]Ĝ,dv)Qnmۉ2` a\E=O;FACE#mJpFQؘ4 QiURۢQ4QYvHLm)sU~)Rh=pS%&# RWܴԯtPE ^Ϣ/'<,tehAz., xR Q@qRGs%,PA4:xV;߱~e0:+Xw+N#)@5S(?JopFQzApU ^(rpUUSgXk͹ZY[g-•L+~Ap. .ʞ;\+Mˮ"\Y!+ \\ \s*J}pU\.Ԏ^0.OW/23~bz.2k\W/RR/+վuIpK~9pU=|/ \q̨*rz)pUX -\EJ7*c h\% *ݷ괼M_?0d4??q.h.84 Tg!rp9inyw+_2.M䪱,s/`] yr\e t\0*,xR97g)0ThJ &ƊC&c톮Sp=h:&wÛ l"P< h9Wqo7EiSoܶIH}>$e9kRY)f>頤0c$.!""e){/fD\J P C)yo@b<L\hZ㍙㞄}F;j>yp.g{!{ԒhEip( X>Gò4ٙfK%TݖS J5l=z R!^;& P`sԨ/Ȁ=9s^MPW5-9V2 4؍$湠vccOԮ u7{nQ0X!NXBY8u^TY_PlT$Ìo$T ")y&($Q t,# (+]xؘ9z"lE-"n)h+ʐ*CM0/YqV^na* lV3>&ɄnA aDl̜6cR;9:%{bQFpQfm>n* 0Gid%UF&=F|AZʤ &@DQ"x \<#@t9$+DL襲ٗ&f7. dU5Qp㿰sd=ߦʷ|_iȌނjb0SVqޏGQ-Peݦ-,Nϴf䐶i`VυVx3QQ/ eA1a΃X=5N}[6p1,@sji8PwlP$bjF:%s:,}6=2jFߗ.nŋv숛)c41G_7nvJi7/\ȍ躓b|r;![h[tb ]Mgo3E4`)SRXk/.zUdh|\Er^PާNoPVt/'\b4u:˟ƛ\a~OͯS/k}{3e9v.j~MZvgYcf9N7s2g) Se {/aoމ5Wk peX8#F N6xr#tKWcG*-Ϳt\Kt`Az6*i4",R`DҌ(}Ϩ837Q6f*r6D'1c?zJ0cJcRe+c" `ӂd&@'ۨd;eLY+uL28ll79952(Щ̦6fv^ɳȏ-)U5/ ]/ lAW4ʲt1h/撲;Fz}yۀZ:V2x9zUl&xc(:$AFFNK)R(+H YJCCZQ鵤f .RYdmm2=EkPy?vhvMrjʜ) fbqC8p2ƙ9!%gC"Cdg_?pU'AJ[#T(KK EA W0=RrZV:9;9;y\!S RTP+KK7> I- l"B%>'HV!wLʞ[56 7t䣥'h+SmeghX,$1,Vb 4d![(˾EݙGj^=8B6C3 cLѺ\ 2L^R$KYIO Ѥ6+Y)բtNzJy3u )ûn+d}sY}婗{5-^J@R\;^TWλg X@9JhQiG̈e.ӸN ؈L@\zyEِRZ (~$71>f! +E)Qf!SEN-QkD"tNt3R g̅p>3B}3wۗG0F&nw(0)[E`zJE 6 f<2%2ʁ')ƸlޓqdW2XlZEXx$6>&2PE2$e[ UwHZcS0dxg#\$x.BFu% jOx!<,v)W'0b"*E̪zCp&H[F{ |O?5 ;BS@! eLeq\U65gJ{$JC%haܰ굎.kpvS ! CwEBQۚ] 4 @$/A37nԐ_9$C E˴GMA{$PW"Q (iNtr[tOƿoZC$5DҴҁ0괐̤ȁ0"`ze\^vCyT Zx%.,-gJ),>nSÚKBw{ϱ8\d5?#rizkD*}&OiLN2 \މ"*yk`3}v(Nc?f ;[ͻj&_+ >v.v:;˾YRoZuI׏_zIň{uҤEƝߏq\NjPj5[*V|'}-IwRQ* z&Ȩ;Z;it,wEݩɟ-zfzעU?ԏN|`R.c۷qTl?nFTJ%!A P g vd7+|7:>˸զJr\Аb|ٽ\4AȬv{F8MQV8]"~{<\ֺnJhJ(/aځ2ArG-WGyN92@M2-:f6Aՠ˰O y7f5IS'[~{jJ2W|i`hXbY&g\{z|`*`L)C@Vz+s}oUinAi ]8?>oRjPdS?Ormy30j!I,5NєE,򱌚!>7Z C|a("$E-rBub>8o%i@[9X*(!fGǣvd TkՎFx'5.--Xi#1ӡ5$_ArpY.g| 2X׃?|Oنmnױy̩WHBӺ:Eui58W&H ,69X[>HGv<$ٹ,H,V"F.@iPTqD(U .QĸĀb4h8"B3E(c$pPpr(qTfeL|eh hYl Ӻͨ G5/Tɴ/G>+i4O.JzsJ(]tke앛8HT46*fJ,OVD1QIr@9ƔW <U=jr~W*d8Yԓ۞zs+.eR'  ) Ĉh"cyNX,!t9ni'3xjdG IԓKp#5ӌ[z rC* (rsH k"&{0>f2fYW1N?3;Kom/:pZSx=a:@'a:Żgp'cqn-Su)uT5]v{U&p\A Fe#P@6!98Us]'H/bϔM-I+R FbU_.%f+\? *ںT>~\u<^C[Gmϧ5﫨P37Jn L1o ;P IϦ6\"8CGbiNMMkgezS^\NΚG `Pb~,.n?gr3_HXb9 0kFb . É\;aysL]P'\Bcv/[&)GNrݨ Ef68[GQrA*28f/s̫D굜XO/ew;g H0Jk4]^ qWTPv*JeOz9Fn.|wxH$'|}o?{u'8?N޾z|/U{~{pc~|h`m M6Z9f7״95p}\P(3:^>.^ zk)8ׁY&HGb{WlqI (CpBȠ3FdDjI'P)/ nlž?wۤs!4uG@rT6M"Pe-ʭjƳH'e.%Cyd/H&(}x)dxN\0w886eA pBښYW'2Hn\P k>K!N'k WVDTu @D颶 @8e\+,XȐre OC9 0X9:+ \IjAio3jY˵4mZ ~Θ ^:z"kM9\0UiH̖Sl"uѶ{,=pM8/S Vȃ)pgRH ୲=79PrePjJ~hy)ߧ}gy^b}yǖiOx- :c:@Q:#dT[gWČm-GM#o+Ho#ks*Fff|}sߝb^ENb`?" 9(o"WT{z{|쯉ɰp}Yzy$G۝,ɽv+7=jWz fk۠޷;F$d{DCoS Sv Bkb4J.hb4Y/s=kd-*[\bE5Glz;(,1Ma UADG)޿; &u׀=6%e%Eη;0r1zR4V}Tr+=ۏ0ҷe Y2Qtg i$ne>L(nb,p* Q`\ Z>3!iKKn,Rx x@ #S ĩvHp`2Qf$4!=u&ϷwYogɮ:v+QTeOWu/d摊2ؼ:Y)|ڑˍ,(65~mպkx1Ym@uϢ('jzQ }Z K~ R IkP߰5}RN(4Gϳok:uƴDJ?æ#Y .${Wn|)ޫA/b{w6B>h9B_!hfI`9Z["xBiBNAj8HAϥz`:'f/au LdD%7>UiqMAȜ2YF1#xd4n}|lއly:y6L- ZQ†8@=MJtdU,ʠ&+L5?(uߒ.tIIYJtwBlHz(z3"DfNNWk{bjמػjR}jl[iAWfcEO)Q=b &¥&B\0]!]1ʕQ B'CWWT ў*b;tutũId JwhY Q6/:(R0&Mg &NWrϒ$ ++u*th;]!!m1+>}"ֽo82DpF^ B.K*EɃP%F2lS- wf#dݳaj(ol#zG큉{ڪAJW UyfXo>Mk0{a}Sr>-6Ý0}:`)" \ ]@f%K,h5]6KI{5g$̺rGв*ZXšo\ S;Ā8iջr,5^;_\ J.7²B ╁p,iNMX ãDYw2 yPJ%PMm`*`P8W=1~|Q8_/~5~[9~czC Fv fW᳧}=t+rwYy)2E\-/O+UI>:7ݽGvzW٢+''-0}$tሤ;*Ui%uDEϼ0 m-ea gU*ku gg"Ң42:5c+؂;zdgNj|U.wkݻd=/!&YGaZ6ʌ(S#JU_ݛv~7B7`.{F@xxN 0UH\1D',T<)XZ/'>kCz8pkk۸ Z?{iӀ-0q/H x9W^0 ?B{R(|/:)$y&²Td,h3!z^Z.b{lVHiMkV ([(J~xNWU4% _T[ Jro^.Np%Ǣ5;R>HSgNDM"8t#lԠ"DŽe!`Fi9UUǃjp|Gl-EHU @H.ԁhwk]6 'I8Y:^m,?ǏB.Q/ۋom} 棒[j̘||nU \ehhdVCp8#O0* }\\S[h6Qܼ~lq>8fW:k?4;mnGt٢Muʑ*TdqHh1m+Vj֜WCaX ZΏx9oUG,;7Vl-KPOm( XIs4WeB[Eݫ_.BBJ} q8i^a8;E& ɓo/|o_`'G8:yK번!;hBA]a0?M U޲inmV]۴kv.1"İ|Qn|Pԙw]%39]PŵE4q'=u$ R*09ޤ4e(-8Tl9u %-r*4>泡"u` (D*3GJFF)Y= b) iAMTRYP2@ ߣwjNhNz1 `,PGQ ߂1!+=ը;o|Vκs3,)֋>m2Xާ9:1޽;@|CJuf1`,7n=:z&t(m5:([?]&)8(:^z\#dN S#=8A% N,6A2P0^Apډ5QxOxj BD#ټ,5gR|N:R>Tuu :e|_rr2qhcz>aJ8#T^jScS>](KDIBy@N*j3PܫmbVmGMGO3o'ۢ/ xeVhJv,rO\@-oI1Tl*%K1U8[_zYI~z`Vk5ϫՀA)ZXMѢ Qxb1"BH%vKIҡ"$M$?BFboBbkiN]κs30,I!];Bʜ݀r}ʻ}3A4޴ ^2w6tN%%*9h%DRP$zHh fr5ѽڐRT̰Di}A6dz1o|AƢ2<:z D ]^ e]s_9bS㭅>yr9Zşwz{Zs9*2yETL;L2NԕgTjɂ%1 Ze8ęQ$B喌{ g ( H@ˤ#(\; @m@G2%y!qn('Ĥ ~^~$ {$ov曆yI+DvղK\_M뜻Glx?V4!lR A!D I)AB`hR`&`B t/x< |= #hb5> Jsx{N&$pgL/y:<0tn|D!8Yr|J$XܘȈ+v`S&xpJP*A=w{U7;gY/B<{[p`N u ĂyɎRB<3p[j}\|b\b.:fl*L@Ec 2QDp$f&WwIcD &\ JnD¨7R1" +ʔ$Å`ؕ:U4OOΆPxQ#ADӦJ2g& çd1 oXs;#p8xx sǫo͠ mJOi!֑㌌Nh&9c1Dy OlкռG7h|8OEW0oRZlneݬވG8MҧOE>TH9ǹj1nkv1My!(B]E^I%eJ_Z$R#г>J&{VrjVUD?jQo\t>ȹ?9Mq^q֣<Ռ|+E]ɕSJBY~@@ΘؓD4lwRNhq]+<{_^5rOɋ ={hŠRjBZM>k{'q4uukO;m|m4_Mj}ᤍXe~+?p62˙Vlv-Gಈe]^.ڢ3"jg:Vڳ{:N:)yBNNVo0)W$Pv0HYwŵM'5N+o1,j\Tվ8OS|+ 3|\rN0Zwم{<`B}lpq])FstAi$ IA!ic$ ZHbEv%ts˓ӸKg -:8|b n T , |183 <+#w W釓eԖW7n 혷1!SUd}mδ):w {p߃E7u޻ώ`:`Lk/K9WV@<(%yD@4Iy{wts[CZ;3+<d8oŸb)rJ'J%-IؐMLпO돲݌Uf rlK:(fsv7,Ϝ^ha:5>tan1)¿!َ=~^NzNeiBWD:g")ڻf!r mv'fRN7 d4fۺ·ϰċ\ Ş{ɡFP!=![%K"{o2逖)ISkU>,I%:\6}_n?1ܳףO 8ǟ}߇~67uzz^Tו{gO5&wgr3>^ϵ}<NEtՒ-j&jDqD]\Rr$gE0b|oߓ-;e0ʺ2t;9JWPʞy*SKڿיeYVYΤsy991ҵlLe8qg[f8]8:}-mf~j*VoO E-j<@\l~f(MyVxd|vKf@d[MEz@%{_nNN=-_T?̮#0WYևlޙ_?y9,-8_-zlX~]O_~V_x+?_޽^|bWG7ez:߷?(k8e}Gao>8]|z8Rԛ#d4wzO=j2,}TP׹1Jʙd]c;`!1-&%A\8LJIU~%[^#v߽di_BCiS6I i5bѢ7XBpN%%E#Y'I(0!N3~m߫pÍE'>'id#g3):>nėo+S4L,6YQGR`w&a2`Ò۠8zltƺ=6y\o4uSHڻBKmd۪9w|ypuWnD;vJ(%)9\`ݡ69/KU{t v<Qq؝CUr :|0KP>XGh! 0 aft>͘{Ͷ'7D2 vP:P6B9]Dps$sHȋ̛S{DEvQ9$k((-lc-[ TnҊ\BŶ-a3sl@(SP(C>JeRr, OxVAALc}D+H8]~|gjE[-Lr2&4Bz IWʤGX1ȾCaTTi77cZl%{jK&hϸxͱ8I &2 J(©b 38l$ j)p!D:!px"Fڱ/6zbeQN쟧[WObwH;g?P1Bg?N&M4ǚuN[k oa6ᾔtNs6>4GK㞲Oslَ)h9HCE% lHB4"<9>=qD ϗ T_!S6ZRB2UL!Bv"8;2XG #2x 0eF%klw=)+l-7tji_n:y3ąr>_JSTħ[DNt*3Cu]}| 糤X~a2h>,ÐAV`N̥}O藞DcfWJ\<,Ƅ]TJ7;SB:J £kVkf6:rg^I4rwvV\vZP"`<`v^|K__6TɁK"{!76_Ú͏O@lno,5b[׻ݛy== ./wv|b~n?<=+wnևxy;޿.:S?|˃0Kp[ohʸT/nݵ@-=K.:Sz~2+mRKT% s*UɫZ{Ur*Swz| `' lzazƽ,\=IZ{Cp4);.'jߩGAZ \Uq NXZJyoJgJy:쪊OV;\U)W ꛁ+ F yBp'W,3TJEc+R[>zljfk1jZZ~7TJvI^mp:ý `v㬾Z"ۺ&vsw[puu_n/xet(qWo#/(mNjo|%-ssZhM=dU\Ժ 9`{TT##v@r/y;UZy:=lұLU?9셢BK)4ޓטD@2B F!&/Yy韭Dk#i'ŔIN}_x3iGǯ1ɣuʿ/Uވ18,&9r+ y-E$kу }f^O)X' )KH.X!D}B[B1IQ;zTOZC2>oynZVci)mВtN*r_]]3ҥR)ilрV%QtfQLJgX+OCqmUKwХm"$٘ykzdmfYt QB_(VE!+1x&k!FVfxrxeLZ 0$aCZ[y 6Y$^` +o3O4[>BDWW;_RՃYgX 3D208'h_ɇ|{sc~@|D7-ѠXky`tI Ah?- k>vAZUD 9eDbF9Jij E:b \qd3D6lZx£uǣ5);=yqv;}c- yBJ?$R tKaȨ.Ų Tb q`RlRY&$$ 9HjPX'4((c YOy &NH2X|6d 6XZƧAa @ӑE"؅ښgؔٻc x4`Fc#S٩u’V)Fʖ*ط<ȓ]`23:-5\픘sHSWyqʁMՀ*j7+fyPJ"~C$Ƣ8&ϪͶU@:Ðl=em- eVg[Tl^',?Aёm)lqbbYIǢY]k**F,5*P%S#+&dvpi !Sk),rB"vcb`ij9ĔXʙgg2(,JbI-%fBLȶ|L}3S5fowQ!?{Ʊ%O>] .nIlbˢ@JN[gH-J4["9<}K1d a90SA$ Ed&X@E"3JW5gA ]Y8&`h&b0S|C04K4AI!1ά :@`-. :f&t/ őXiA{ d*3[!(1.01)c,T RI[v275=)))E>3-([jk%YvRDāj`hfY\Dd`H{iXE]j>|pNQN tfp;Ҡ,hrnu+f䥨"ҊY7#ŘQTj1&J'mY;ucC9\Rq+f]Z01ս H}6=DF: xs:pn>t*&{P\!iZ*#h`&S!yd0'Ze& :#/3* <$ D&j0Q tCKtlqwƣZi/! O͗Ue 2inu[@q;fn~u[Pu~R& :1+ѠKP-"W#hFyҶAMR tY+I";;XAw7z}1?=룜>wdi"YrB,GKt;hDPGa}R{ЋIuDaJt]/sgmEe`PJN:K r[O րhg]+@r]Zx _3` &L={Nw$= ̓_ "(YC Zc~wmRgRHQ-  VAgUkU[PaRB]$IY6 L!c(j:T\D{-6Ot%XI+,IC5$ʬ$"޲[H@ti[ #T}g`T6P*47.mP_tВf SCjijC6;X~}uxr6?Va|L5:R&j*` ֣;V$L'Kic+q0m˿ Ndi ͚g)DzIIbɃk6CM:Mhݽa#[NJ3bO*Vh  3`ө+fs^ 7"Fh<^O:(1˕,.z@ AjЁF1K[Ƞ rw`-TߴaKh*TOkW T$$OH1. trSpr;XL._*@IP(E@cDMnBGt00Y;`-и'ҕȪT~M9Mk.YL@ZhIg=ؤ=ɗ Ug&ddHV hsOk[iy~5kW!*ZG|5у7[@[qgVk D VP8U8tf9]!-l9拞 Is!1>?T$D;5>T2'IB 0 JĒ=iV̯^#p\( wmM\4*w3.H!b!Cc0&ʥMP$nSO hZF B, r6R =];S@]pB[^>YȞ1 Q6쐡&^|ux?o}pMs܀YUU0v|nƽtHey6'=Ć{bsWnp@W̱Si4Q:Nr"= N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@:9!8>@QN &]@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; NhއBé hc{'PzN #>,; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@ d{u8N Wurߝ@zN 5 N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@ cz}3jJw=noۛ?}]Zվ2FlUkW"C  WBMb֊;E"?c)>#_IRGa6~};l,-PABE)/-<åf+ 8\Q \! Ep| 2\Q_Dϩ Ja(ZpEQn= cz>pEv#(î(Zbt/W3^:\//vg_y[̽_k~QJ_pW]/pEZs0pEzy(pEFp( W% \Q \)Kb;\Q`"pm\!`-(\(ڧ=(!\_{HpWuWS=f!\QɶP۸Y1%߷f+3m5YzM*QXtoEw .US*tC)yO[]{sLï/~ ?EEZ|~塚{ޏ /:~zq7կ*r6IB:ӵe`Z7c)}6/sWnvfW<u79lOJtbotwܷQJCc8p:b-fϑ! L߫F੮^ϥ3~훻~?TWɑNڙ޿=jk _6W]S%%aeQ T̫Du}ױ+j~ zw^|íoC\?V/Oz?,]<<=g}NA}Ώ*QUM3o{@|7wTDqXnJD[xjniMl:>)eH^^拳> CHQ@ƚH DBshz}Wq~p(/_W`H~PjxkӮzdj0cb_WiEsԇ]n?zyv޳Cvq>O/ߵNwuߧ1x &cRncG3ـwaA)}U\5^|yz)]x){)7پzoU_wz\-TIyM%qqdQZ+9z bҁ:Ee d=PQb~I5",-Gϑ4@5-Wryd]U]ܑ˓a|TFsV'FŇ 8l~} pGN^ykw\|y7_o(*sgƋkX%hg>(S.9k Bl(w8!2N8W 'ˌb b,|R,⒘!^ }],ݍzÐNO翟V<ˆ]DR-ֱ=F ^rB$kIjM7N)%Rj "4T9+|/`S;`C5fF-BfT01bO݈guKb jODm#ExO\>k2$KL1v*z&Ш_ ]ӓa.L%+ 24T_M7 3Uj*/00!`TppQ߷bV TPD#fDdD-[QiE]-AZقEȸճ4N␦I1):1U;/uFu™\sAuC)vx4' nD3pq9>uN%E&Eϸȸx6:P /F&3dc_QHJ(.tJTŧQeCpbh zWZ;W1W{`6V7QvTO6wo{XD|@g1[G=콛`#mr,T|5,Pd6dx6Qܾls|`,osll-Vjo*8Tw%XHE eJEj!BԼ`*i+tm >[{1:v kɩk-lʎg?3,3t]{ nA&7!^ qˊJ~}H:F U=a!r4\.EV.Ni$zL'"[!@A_r}p*lҊƯg q>*cGl~e{}nU9Hȩd?c0v/#{ c~ 'b'uUfyfDlEW.˜]E1 ڿf(Y_Ү`6`޵:w++ 4Pˋm|@awvmZO;YmW_Dח;SKo 3/=ykh9E7'5_6`n<0][o9+z[y) Üv, d,i$ٹ,?QˊܖQSd*~uaU=Һ=-pH6M~ؿuԲKk[z^7~zh?LF=-6u1Aa^[u7`L\NtlwԦnsJ#٧)|=wE)\m,m/dim^1o:O6R@30lՒv:fj0:b;\~kXoM.y@/;_qv6L^>i0لRahm2xǓ .BL9sYF qsM^_j|ڶ I22t>|E XKzQ9bMx9әSM\>ý?оKWoq;4?3]^'6i˖G).n |&m}0Y[u,ϫ8qW(q70|4`{ ?;BP4H־S qBM6/|Pfo}  \q a<+*''nb5h+:4KmHʷY֢t6xYe" **U`e}tFNNUHשgB2yX ,0Zj+1`&>!pƲSw^mɜE(\CN@08z"xPD*ڃN j[[<@rTG~ݎpc>!t+>ٝZ}mXndtx*|U@`ſJܓVc{R-B1^ Pc-혵uz jg ZBU*IVV(aD=n@ٙ=-!Ϩ@lIm*KT(}JiG/DiMFʨgʩȹX YT0ELɹvkv0P*+|DE dE-S)U1 9:N9UWtcW(cEG N>lر||C칎RVe0z{Zzwt謺<1d82{27B́g-0h2 {ɽyB(7:}PnZL<8H%BdF 7@dI@d@qDIKU8>^< ⾔ U4LsYj^YRQX`pmzVX!=hH#$ (bXbQn/5z~Fg0 `IBƒTRh3ՄfbF%)˗]H21IJD5do>#O]h - .7[QR'wXJ͗:{Lu)zY˭A_iyIy/Wߚ^6/;  so{7~MKٹNR6+n.$gjn?U<3GN0-&i11{FK%^4݆NjEjl,y]rS)( 2cnr9"7~uxuAi8|oVk)<ژ~.}}-Dk_j)tuv`->4kKLԊ\Ԅ0ɮ6I9qSO0MyۚӢuo޼x蟵'?~,>x >#9fҭv8EhwϮa#=m0qt051mLQ-JJEpql$Q?rM6jiq:%m&X(ظ7{%wvjלWC>Qݭoz}aҟbqv[oZxHR+=ZvU]T!U1gv?S{?|ÏiW?9'z/Ba R>?>W>4CK ۜb׵9qp\:5X+t%GAc٪ޟjm4E:xH &ᜍAR.zR\2م+36ذ.U=p坣F 8)!1娭1h4YZM Π J3 tIΧӊOgfwsIڞ82?k3׍?ֳs+rcI2|(CÕyvfje-j^x[!JB`. * s92(Yy.TXZ ,pcXJcofxIl$mv+#˥:gȌ4Q{qHCǔ-wi9Vozu[P~^G>~<0 ͌4YL"0i+Lhu" -TNeSZ zS} 1A) e<tǒQ SI<(MN*11 mXlRAK:6R=EVzf)"7] vSRy4_uayf!(eFmQb Q@bvFj&ٌmHXf>NO~I|^.Go%|p #pL<_r~І4+$_+4eL--ntXp$Nĩ8892orFLQ8\ҕ^G!-z d1F0)ȣ`ż#wѐ p 4p)(ZG88"8WJ;2 ~XÆp~PsLgoOi'uąkANƠ'}FXn{iKt8?uǼ0}֋BM4\MvT rvNy!FKg[Y=h]ҤPݴD:Mee* *g&봚NyY&rO_ZhxN0)kHN)k-CD F1]5iR\HEs@B"C 60jǁC) oމWSڙG2 x yJ}sbr V^ Yywy!c+ʶ]ɶMjlۅZ)=vR.wm$t^D 0V2]Em\)ZU\1UJ\q̞J!`Ş?:VȄ03СVd 1KXH "i+wSZ[QZIBW[Vv]({bU9rOۮ jҶ^WFB3N >Vy Qj2hl5d*#V,$[y:9obMyv%k6T+D6nzw_pU%Ѽixk7-D6״p 2-aFU.{%&()kXmj\ AFAZa4I'ﳌI:s '|md>32nWNj<}XJ焯GA?Gߚ<ܗ(rΦ_j{>]\@-!ia|0Ġm hk|Rpe@ǘbGGvY ŭ$³H۸ `x@@Y M:4a.*xIQJBRZ.oT0pFINChniX^<V[-;n j1iℿZ3Ɯ>sDKBe$y(%Ac$hb9 ,I!M'x<&0%z:A5:gC Hx# ^; A"Ȳ| $O+un:@3A%70:eKk`@j!# Ph 1z gAJPqYf;pL_36hCGLj"PԶfH -6xaEϻt@bHE,b>D4 e&'\$#!8˲͡|wb1zZ{O[zzExsեe^7zo=N\fpwU׿h\A\/~|uǿHߟ)-u|ŵՕ SM~1Ma<=]\w20f# ȯ),$]~1=Bps.+FWO_YSf 0 \ zE~ #XTɺ]i%kGCHlJzfy<.N]~xMȋˢLbo~ӿ-G,~=W;] ZԱ10ŷפ-oqտ*wߞzƟ??]˞xoLZ#=%P7}8~!῿?]NW M? T 5dM겻LL,>hw'oywvvOp==o0wYU>wYYЙO>!HSakO3Wۡ\y2fTپTyu*S!ӌ_eW.5W꺾oWA>);}fKwgAE7QFZ8{Tw| j!Z iǕ~.n3xWWgT'9XQZnW~'v~"=n ??";]1a@c̐}g6Zns2U"(\A*jb8!6Z;)\_Q8[ո 7΄MfGe6B ēwU{zg"Z+k`mAwA}Q:]wucutǠLYGzmUuƞ7QB#ƅٻ>G%r|Rq=͎k/8#kU@{[PkC7`s(YYcm 'U-2tm=Yd=Yd=Y|ًe^Qj &-[SEBu!6]1Ry6 ٻFG=غeQ SsĪWJl؁7I7e?PiMcc z {:Qo[*XQVQvZ*Vha܀܀Y G{71bc~N=q=~".]*91WewͶFoEMG2JW%qB]OFZxg,[.ںFwA&];b;QZ%4F6Zݏ[Lئ+㜬N*%Ƈ׾t6vO. "xΦ/n apS1omï뤟/>+艽Kw?zlhŶ6mkO'%~E#m۶=NhmFb1x/h@m-0(w>A{!m|/1>7G[ҍ7h87Gaf,V(N!BE C{4B5jQ8uj]Xnc,]K)ogB5%tPk}'퍔A@#Cj/ u x#۾”o6[T8GM87Zmܣ1Ð217Ĝ g ڛV }WrP2@ ~lU*"VZUm0ڔs(O|<]>Cɗ}Wgs"hm7?).GDᓹ55d+> Y㐢([ۄ0km C *'h#nqkkwv{7~.}\ ,?sۚ~6o.]\~^S{t .5mO^s%ϑYoTp>r"]vס6eC}ctYgGi & eM/m=5rn,ЋJkTP[:?*W⨗G=ّoX5u(DY_>J(jYе-Tퟱv]^< ˣĻQ%7a=wD+ia܏x_ dVh$߂}9g3]!t_}*8/ a ~ᣣAS=u0&s_||~kO_|Z nXTvw]_0?]۳/X72~닛|O,^ݢ]88A |ӻw9Y`o\|?׋_sVK\^Wy}UBto̳C_ 6!'"QyCSƇIyW[^ؑ~b*UFPN:Y[ F! u}kuN8Zefcf| >|XTxck<_iZ©:(UUvC5蜱 x,ՁЫ.y\7{ʌO~r>?n)SmMQ Ԝs|nsuEe~wW#T 5qC*b|c/w#Uy|D{x>]<Ұ2݊Fףs? *r[1/=\nbo؍bonQ84l-]՝PV>FVO{L8|}E mi0ΙvԄ/H3r hf!LpN`- luK҂ܳR-bMO~cgTJj4\mU*ttmz(`+>]G;<\WDt u<z #]!|"Jkf+lg+EWDkC" f+F=P]!Yh]WD\ uEp19C/n\~iK^ˣǢFˋnTC@J{+H<搋Q\̶ .-3]b7Ш/U[[0zٷ O/lc)~߹ҧButS7ή_XjLh7}j惚li΃p;U'jL^մZn(V>w=mݲvP˺e۲[0^7.LX}#omx˹~UCN!sob/¡0F o~z{L%Oh{F 756tL-pj 3pApי]QW8# =#].h."uEEWUF"'-qbGh e#t;6=>8 X0pVWIJitF @jߦ( tE`p pN5LtEWsԕR^HW,`MtErQYt5C]i-5hFB`P."ZUt5C]NOK֒jEWDkm"Jof+'] q5\tE+ "]QWV)PKVwǮjk6mA8iiEDk_b J[i !銀)h]WD ennb+6ѕ+$+Ԣj A6"\fΎhCU0BȲrtevlzs"V~GIN]% MoD M4J^I+StoKct%p+,?2#ċEWJX FW4]-d+meYt- OMՁ֩uE]PWUa9 ]b+5:w]W(uEZ6nl $]g41;5?\25u鏴mcc1$IB~hIiektiee<YRMU6-,D8w*̉Iغ+ẝTkg&p5uf5٧ejaS h+]n.BZQn]GWym8EW qA."Z}Z QPt5C]yx3#|֙ װV+m|ճ*xx'[`+lt(u^MoO(&UQSKÅ7̧NtF2 EW6 :HWpq+ jRJ tJ6"\\tZuE]PWz\a+6ZՀhM"J[9 Z0-p.F`(}(089l~YIOߣĩQ'k-_7vl9-1 7 ױ#mRA 5U銀b+ՂVuEDsԕAeole3gGǎRj]=T՜FW i4(+z1r;6;Z9}Ypz! wjizl4J\\վM/7tE^N]%ZerQ]PW 6% j6"Z'sQ9J+p1{cp+ >w]!eg(, NF벟" ]GWF+g$#]QvFAUr=\WDDu6+9q;.B¦DR7uڲ)`e7g2*`}UK!].CZa")J]0ry-sځ: LYH{ܩ'FZAium5SZ ;>Y|=2ԺL-pjyiB`'p+5*w]+i1sԕ1FRBO1*EWDkQzSt5C]p cj|%-D!\WIN$\%&NIH4t勮mziHWb+ՎB"JEW3ԕ26dt ltE*pڐ뢫JS6g+`pDWDk "j+84\t~4J]9X<4-.<7tShؔ6F+F&`gV.&Zs4QUQ9-1 4|tJZ2rrN)IWe+UQPt5C]y8p0:}Q]QWyk9]!S|+6D;U 4 ^ŽMNW^M4`?pԃ4کc(Cf!AWjߦ޺ ړ5ltE6HRJaf+vp+ 6w]!)EWѕHW଱tESgZҕj`]0+V+]EW34*iWQm<3IY0B$aǓ ۬TUNϱCR?q)MI(K7X/'JjYX.-˧@K{ @z!ܩF8^l/Вp;ij=µ|çR20é'eGL(zEWH]WD)C u t%o>ZsYg&ک Q9X X)>IDŽkd>-(CYg~1[Ӄ8*LD`=U".IuHkI:N84)]]W]RZg"sltEqJrQfvGJIk=']!Ӓv ]PWZ 8 ltE|+U" QfPt<][s6+*CyffOmUv$Fo~dѺؤ@☮H_FF+ӻ+ erWhh(U Ew%0-rWnT;|PIE,B~ސ g#WgL( BJv(@Oy34D ç,XO?FV47խ%+UlYz9/f綳qF?'|)â(x>=1ߣm{MzH.0`拏KfM~Wir+ĿƃULVx[;P:_FRR)a]~UK{&;Li `Cp%m(oѾҙQj֍op>G;"7z y$!o}3q*% {' Hah!%w J݆lߝMC=?KV$ |ު|=V%!Vg18ʪV-8iM*jUx$HJ&C덢it=>h90ay竱E$}ZkIZΦ%w)U Z)څt+…LY',Jm: )8jM(Ń^'+48@y6 &n$#`(8K {fL<=grJcl.%sJzM(ϥ3hK*B5ߧwnPVPYws\ -W4Q]0蹐% @8r(19gL{cV{o@*lL-Z32U9/_~k﹮&̢>diTo6AH<7ȭz9ɍf_fD%<CTgY _յwۃs刖hϕ?̕<\Jz[A^ժ6- F~F:!`LNmN:zRwNFϮ^{Cu5uּRҊ(i)hwE;wuT+D{ܕ qWvʢ`>=M.ӹ|LyHy D&M!Z:Tdl=/?r8{oqy})8/T"YƷfOÇ$L|rKޒRyJ E/)rɩFᴜ"~K+l~jFy%a [Wko}LH17/)KҤwn5jdg!/1,:ӸzWY`a^ vVM)™OMulFU)IU3ZŢt|Y>jio~͕&h@Ozꨆij\G13RE]*;QG)Q.e%oB.8h_C9xeIUƟ ?l0sS6/}SCzU6k-a;{һ7H_~si X^L>߷+Xu |goG4VB\b+a {xq= /YpEAJїRՖT%<(Wm(?A&P%H.b"m**oɄUTю28ebsՖJf*x5'FQbAw륭Y%Tqb9TS_z/^*Yfh(N=)@J6E1% Ka= InJv:;5WmLTsF5`%T,]Yo{ L]uۀI`C"Ғ%Q $-ROHWQ'(KQCB"4fsuf7;V-ܾ`d8(Jh.< epas(D5 2^ ~ʟRIChP=jaVdiNϜtoo~0\\/ۓ \塂 h˜2U Yrو[g3`(\|`U|=o6Y㻝Px}/d'L缢%bRJU8tlp>]a]2F/'%3$B4dGϜV ÛE[XL [``.rEOٵpGyOxsQQ(bc<㇭P$Xbk c^Ʈz1opUdQd`|ʹzpKkp43u3 S))qҗ]H͛FFƗYOg.gqj T7Lz6D,Q]&|Y}ۍI5$)(5lOմ)0W6Q i wVX~qF45sX/OiFig +ּLI|a/C(@sn:M m$QNwQX%.Z8-hAWCy:wV 5i*ؓB9=I7b:Nx_~Ɍczk\|(sž̘\xMD2o‹%2"`v5QMiA57k-PQgW@ͤ`yx_<},M$bk'37D36h:L Ѕ@F$Fs_$$to+VAhς83qSc:| "d:J+ϗʥ7PHgѾUQ$i{J.(F hxh19eIcp[Os(M~<|@I (`:.w06o}H#Pbpr~9dχo|9̅+} ]^!P:2$* Ľp2M.VJfl6l*S!NZRų̵"`i=aWQU86vd7.h'-{UJ\sEوYQSO!mu:CYf/ if›d#ZS/f0[INI =gf>ְ-@#25LU dEyǭgдb+!Cz2-tNsc0وLOK %T^\s0D 6dA,+SV)(oKmYX6g] U[.0"U+oW1905S#ԋ"6+UG6&%G0x?٣ݛհmu>T:)Gb>-?ml-$ˬUo x[2?lpr3&vWF"\Z3k4JR񧅋wdK˼vzmi(۝81\g [rb&" T "a5kNt,F w'}u?$C 5rʹI1ge${Xu.D4 %1 :ZՉ7ޑ--/OX{ !ۧ;R):iZF< WH " 0012I҅R TH0M%M%`g5D_m,(RdE[BnnserQ*g(I8 2AAT, G[n'8x0lEKKFtVXCZĦS yQa\lvF6|=s74ߝ/ǽXH'"t2wFoc.cD@InYP A*1V=Zvˬ^R-.'EoK,)qƞ9> xrJPbe)0ޡRf9$q"q !6]vh 0kH˶.fMz-/ q Ѹf+D'sRHZ ,7tvDlWtQEkF'Ik֌wmuƹƛ یG'cY.eb88;H[=lgg+R|qF!ʻWℨ R{> 7*SNm!DQVȠưW'u6/w J*Iᄌiwq)wcBwFumfzKSu GH+-2\̅{B >Z [~YMRmPW)qNɭWC*H^h2dTcnf !loދܿδBFj> ?xV|Lml/|{"L|f><_ӹi!!Dd=eχ;{πz%}>77kRS L{oDv'#Ǥ߇1V+a>cG7f><[vcl)BlVH>ŵͲ׌I R@ooC)yG`gCL"? $zb``0z{؞;˃ З:+h^Ӄ.b>J ?U39,ypDyݠ%yH2Ds &m7G;gL4)=?}O tP=)MSx&4>s?lo#_ǃD-\̢ sfT)pOx'A4EfkܾӿsUa)R@k^yE))B ޼y(L&wyMT{L0)lYPV2,Kޑث1xyzy"Crxo n-?+'q9架>Z#8e`F [)O }f@{}uj3b)Nu#@2tKhI;욍%A{ v MRg4eE?L#C.%XB$TŇchV M߼G#whSŐHm9C2[}nRh|$ͮmb  348 ~I^GoaQmٖ#Ʃ ۷e 00 ʼnHXW1heBbo 3vFcQDZ^ėEѯ-qD˹,6M O?_٣1^^Nj)vp'`Dxjxd2Wl쀛T8O i#c zpfaUIjYȏZU!ͪɣ %بbZ=O<4C[TcCkYz%eD@ZZgdT;]nJ nF-J|_A/("?c͙9Ev B. KKiRb*cn#^D,`Z&Y>o;z I)8l:>³u0j]5K#'N a5gg<|7Ogol熕|k)%'t3NF̸?i jϡf\ R/m';?wh8A*_\b),ȢjӺwp+ޖ Kmq,I,4m"_7`(T佟%\`X e{(DH$}˼X̃puϿO))McxckwSLVww8j|a|ëuʷZtfA zX :!+^ڱ>!]RdSVX[Ob'!Xf= D"C SAB4b%JCv@-G2!H| ©#%1zlS%Y<ϥOp ܷyܸgojIYcK_jX|u%7i6`f4fQ~ԫ/~![bEW}:=,v1޻NŌyCAԪik*O-e6ӗ@/~Ƿc/^/ .,+3ݗtF %vyYq5f'EnkQ?[fXwL)5fS6whƕUe@Okr%0f#~Ds(++!jm3޽i)+%AYl\eCjEY"Xtr<'_3Z>NPthK򴥻mbA0͑s !oY]iQbѢo2re1)q` zHXwzx4\lOQN^{Micv,OH4gvGdc/ %Ur2ヤJDJAQ FeuuGqsW $l"sp<|I2ҹ#A|,ra ЈHe{IcWЯHyDߌM|W Sr.ZeIqT!e{c/)R㘊'j pW#wHqx\v7fN[Q\GiJtqZg K)"eG8x$(sQ2)윅KUƒy^ #jXJ*e .)G;-Oضh8J6*0A)rK=]Qhҩ>Z &*S(%J.=%_T͓̓rc=U2L mmr@gyIRnPZ3@I!TT $PlT !I*Iײ.eö׷Ĺ ^&}==F5,;.?Nt'VҐrnEˍwƮșRiK[LRo/UF'qYOn+0ʛPZ xHSA9*) '{!eAe]跧8xVLj+Œ` $O_O/&7:IB}M~-aC.칗dD3E1Qp"c+hkOl3^<ϕpPPY6jtrjԊU!wcHy DKv#kwڦ~7ݔ6Q X v)"})uħhlmEk>{nt,Rc]ñ6^e,wXj#.Xd82J<"2GwB~զ'Ƅ|Q-:>rl²3uo9eq%69$Q.eqI3oY&2ĂSR&ȹ\qq!0 G6y)&;0{ imm [O\Mr g{~%T`@s\s0Mܶm^iZ.<(؅m!yeFSRm>?}<[$Y|6涚 #nrI۽/y'|oS]4S ŏޕ4 4.˃[@X0&EF!'k%Uĕ[\K]VS/ưXε>:&um ~ = ҮP|~Iʱ$u8ȓ"7گMvHf?SAhK6e=Vz3I]fhh(:Oft]FVtL!0SH%zFE+20ȠA hϊi& =OшAEXW{PFh0rb=xZd)\+HpJHz  PZ DwZSm$|y8F [+7i4#P)6pIP*V¾wN:'7(H+xFӥ\?8Z 1tʅ\/0aa-_3W!&CXS)Q/L{BW/qd]>(gvqLR?CyIq{#n=r6mam@LKH崭m1l=u)w:i6ӑ&W p!yLVz@U n/(;c}i1Y lpR0ѨvU㚢f"1#VrrR6w Ս?X,)b %4?}!zuWi,ijfj/He;w%)S#n3Kƈdy4Dj Fy1\a|h~vX l&;.fIu`YH:+h11Lz~͐g/MN(viӔXsiqUe7ӧIzۋp>cS^j Oq+0z+]v$ 6q|6~{K{I) [.; mL;%>u!=qAv:wkL KEYX'2L~hb7tVS/eY- ݦўv-eh[@Q򿍡^~wy~fcSP)5JOp+<2|l@B]>{zm-Ռlg?GO3I޽${$͈AH:TnsR"5o`OAZRpd^=40Fx!M=p(|9.qHyyF:I5?ޟS\8iA×ç j# :J$JL_ؽܹ݌[CQԲ5g&QQ ҥ|rr}5{C 7knJ(9cVt0Iݬj̀b!#NwmCe-] h4zZ}Sۋ)HykmESU#~e22'=j4+*p+L Mdd)#xg ~jf(q 2PWȽø밬묒Go]|*RK,iUeQt:^藧 ?-$,u8^Ԙ,F |#!Ec3twcyQFs 9W88~]%t3ݖT]۾F6\]*=OrvHUNp8Qv)|!o=e[ !lBy/dg<*ؤ,@o13ѻQ*0zcB8|;vXI64d3h|-ƊSX_ xI6ӍVhLYaX'&\`5+_?`o%T(T3AW?2?_m6{:(~)=7)[6g8rݤ `q:+{M'fvwG0~[8u? &:>:|9֟'a83g|ѷ$V+0NT*Œ"gc׮/c/)NhlFH;-%a `sQJZQw+YgFwuW^үhg45T) ;>T؈˙>~zrQ~i=*$Rf$ew c'w%Nc:'71xLH%t+|뭍H<"TX"AH^8  k!xӌȈ1޲b U!lK驐VDu8/NnN{vJۏr_I¢זUdV[K6$+_gӎZJft:/9#h2 ^p,: fiXS81A6\.^+X7h<u86 "RFPalwWy{^P UKr 0SV_;x% bkk;]vi[^i?x]q/zddV`\H hK˫#ܢ06KYn/~iJɨIF̓/j`oMPr|_%,`v+1ȺƂ1w#G_!\608$sA<O{ 65, %?fWMI6ICsv1뇶쯺+AGj'udBy9" p⌼@EH|>;mЮ"8O,2_p cϐg姙+Z Y"4@:I 2o:4IšJ`,MXX4k߈`N%/I!zn'-We鳔TP]';d9TxS&Lk?9[>$W!!?z3ˁ LBM(nMLjtk-H~bH}1~=o2D'0O#r=U7\U ->h2 sƅr=70p Gi;ʂa{ QNT{55\= _ Ґ^#dG&@h dBW`!aq띳#ݟW/RdR(df3@vdE88q ?\r,=@VU﨧YiMTNBrBy~ˌDj`er} "F2GJ[_HϗZm8VPL 3#3j\[JcAԜ&H;Ps6X􊕌=FY }M烄r`"FlI[D%zokPݑ&xetMQT| U>zz$oΣ6u5*ȧpgF. gZE?]\-ŹW4@o%gb0ȅbX I[+Q:vӥV0Io|*lf<?Mo$4?ʔǨ=u| Іn?)usu^g&/ǣRANj*J@ },Qw|*V(_gʿ/\\Й)Zp[Șr;LJ8|RPjAQ1b ;??y-a$S_igWP8*ɘ(dX=??&?|O8L` {~ns&#%k2l]]D{S?NoVU`.vAZٰ]D;8[p̭g\Aw ^:G`EޜtaF`4)vhY 9/7tg4;(N7c)`&_,ѱLhJSW~.fc %@g -GuBDV$p=9D2y>yxG,IT<+; b+ b1K)ϞܮdlOieҜFr\}09te! g!=`3UWG+Gl5kB׈Kpq %i&"d:s%_,/'_ ?uuk(ԡlAl{ߥGuŤx: 5gC+NfrwA+2yWĽw;lH'3@C)W Ҳ頨6rtGTT~xYIf$?"E( b]kq[3'Hr?ɤ˘g8ҍ:wu@ԩec`j+fB!&p~ӘxR*`J;3=2|]| -Nj5h&F+PsqdIl:0_+[+6}.u> } WC`M7N [QOcOEU2}z{%OW]N}:Vp:mN)؎ e,%"+{ D$2CN&:h2p^-8+;r٭r6Q:!tgQUܮD1l/MOum|uM5dHg`PqY=85 1u0狡9犗dԿg< -._: ڻk(eW/NarΨD4Z,f2!Iccf[91 Kج]f@tf5 83ڪ"VW LiHjP.z 4JZ+!u2Z&T>C3̩o}Nz^] 8gB$KK5*vb7͋]&Tr S+w^Gl#(9U'T0䌪6(OgW4,WRd 05E 1YlNIV1 R: .'0D`C34CNէ:;ecB!MTХuaLׯ+Z"ZVw&jBqS18qDQbqFKNH6qJs4sY^12uΧٍ58Nt&p\B6 rU2ydA=H!Bf;٩ C;6 3 <^s";.|,0N}{Y.0q yh<~^r(J_eGP|q_K!((cx3pNܠw3jܕi? jF;{Ur'5p#0'͘NATm29i.7aZT8S8pY6h&Wbpk=6!=AggnKA)=U(^M.F!A R)EU%+)4;(s5ÛC#jXНoorU9p1/y1 P?tK2;t)"y=izA7=T52_X~-i_y pMIQTaVyq >TEYT),CmC`Tdl#P2(Ug8f3Fh.5I t?%ˠ.8l[Ak݈$(v{ 9B 7uX&X-ׂ3]S,;a:- 03q5f/XoUqvosw]#- o9Uj?0nF7L[jA13-C0paW)nOҐ Xcn{uIн6muW@j v9x h@a[`hS d4pLP151~}4{Bř ~n^4%OߝEr&A62ݼgֶ[;Z?a&el:%:5)'(VlOIe21V_z{/]N[ZvM5C6fʰ^9̓9NU ƭSw[wO-3\AN‡pd0MFsfu~&9O/G'Zv+QLsu" f'(gd,eIYg+Q=ŬmZ#٦~b&s=16]˞EKE(&հ(R:C_gIl@pVjѰ[foo&xsIm"IO+ 7bRT"cf˹A< J9; d>ڠ0+Liav =)7{@Ȭ?R}B`5[i8SX(.EQڟf0&m>{+yfV #KEv)2e ޹JgV (fvQ?0c<[MD? =wr&IR&+BINjO_Dp(FpdqHW'qNj/SQtO%17hL oZr~Pj(w F΂pNf"aθ0 1jnRyT!jݲg%Xh&W>D4Lr1R*3 V&sax,2XUx$va<)#xPH˫rPy0,X|!߀r.ً0U2NE?D./7_Yˇ?}ww`٭߬p}^T۰7$GA&x0Ƞ>,{a #-ɭd[FeʲU2Jð]r1 ?2]sc pz}o?g WQvb'|&(2_'U>_ߟGӟ?{Ώ@p>Zn%q3<}19Ӻ?Ɛ|xr3La~pC';}9BDkWNa HA_X)ܠ:p'PgeR2Fl#rSL SbIxtGnI; t `t%`~cKK;yԖ Γc8ly^;ʖ@-O`;p=$oc[3J!;!z;K~j ͼ2jmgVthD|t]PEXb*^sf@(󍞑%a^SY?<J'HNF)Md&=/O"7 j^jA] COo8xKK"w~Cdso1CB/dl*hAȮ o+wIm5i9y: g=/ds QenLZf Q.![4 {`ͬGw#J8jU*j&Mo$l%QeGJMvw[ Z/?)0{akl֞ipÛ#8hM#DѩT0]DmRRW(t^=J·^W  4-wX`L'O(nbȿNSIf˩k_$x}iY~|adaq[iO\WT)+i\.TU-`{Ahd`V[h|9vzx]hUIOXC' bpL)ur41MbF8V* P'OOo#FB{ĺ%^D" f׮z \rKvnoΓ< QZ\%D>su~R*&B=.?F/ߍ[-zqg@5gRk_=x~ulPó~@e>]'L4ϝgHjZeXų xӢT7 |@_kC3vSL_s/$І4SqܠE͊7^0@Tuj[L_|yF*ĂrbSRq9ۯp6 $m0?-Tjك˕˻Io6W^!;Ehz ?r_?C7~{eS;|.ϧ AUN^m)E6Lف$8a1sCۮog 6$ǣxI7\vI^ ]yXH!tOK7wi/jB 79_ɉܤR뮭sFkV>JеƖVd{3^^\3 >1XϽ9#\He >E1vYE+yMkUo^6@3P]!{y)"Dv$ 4Q;HI'xԶΚ%g~ȸqix) Y BaKγx|ҥ~vPKZ~w<%6.1'ɔ#(#.l Ԉ`Cf2vWԿ(Bi'=wdZ1b;igL`e1rG,`~wÃ"Fsʽw͆|cB!,w/n ĎL;zHYzS;nÊ/jphy#~. uߑkaQ}~7_Bp/f)#VD&!gC'$agx" `)?\s #0eCR彥]ܒϰI9p~_ẀMސߡ%w_IK4krx)ga\uXw K:h@qu6r~QTc6DQuSCATKunW}AM(jay7_8\yŅr^vA{y>ثE{WzjW^mn /ΎD__Ty:[PMD~ 'ulh_WBMc|WUS +X忶 2ҌW.p uEphcW+n5?_ ӻ^.vg*W{?g{SHzEMDoW47ޞ-{/Cϻ9_ nהhW|1e%W2yBɄLи2٦NMX'hu\YwuWzojAR*UZ Pcﴺ|_ϟcGTҙ(o] 8J:8euyrtK9֤,d֖35 .-,7b;11>Twtp˭6>p[u:UR| ULūw΁ %E8F|EkmIR,a+Jڂ;`kxda5@qс`?vsLUF%f98)rbX-(z1SRE:mc CjY F 8D쐁ז\zrʣA^M+b-% VG%zuɬq'M7sӼ䩔&J55x"-D#p-fF5 *Ă*IΨmLIU=ˢtA%'mD*~VѽeJ:V9\RDXeiXl0ET,IJ*l:qt2XJɐ6$ft۸, (WZ6c ʭscv< z`jL*Owd:VufF*qkqS^:^sdľ T"j1eQ<7'J8S:Lү"[`щM Qԩ p^u[8f*ى7VkR b'[ȚjQ8 ƂbMhWjՀd8- ȑ,0ߗ _vغ&NG|4Vޗ% }驩0w_)lZ@jK1 `>+qZфFL"9U q&cv@iɴ|"1dZ p 7 A(.^2-LˍM -iRyQ&%:nic(pcIZҤ>ﺎtNI`W^q0-A+_hר6n ^]&,}7h5lzPskN$D1blXmZC 6!\Y8SšCkn+DZe+o@$x觝[*~v2v&Ng/8,ɞ_%#JŶ옺 xp@cK-C;[M9eqmځVնݒw{&R DZHԬS*goh-4=0a\p@rrVsJbc@cœwxj]>mRD!3k4m7˩9][ب]IV|R j B5!ԽWZkZk_xM)d&KSm{)YQ,s!9k4NwWsE5cq(n`ZzRewhw,I_$tsW`^pV#)geKrsW孓 36{deGf.nFCA_DYEN\,AeJ Z UCF8*Q\GRIځVƵ?;0r =Q$s{ajx/¸ۣ-*ArS8+(GfatGO|FG"d'Ru:bE; Y |APi{NR_8KzV,U)*Z1/RϐFXϥva!@`!)s"3Cthg …MO cOV6oJh$ /6w'wo2bPOʒ;~ g;,R%$}kmώ~NF$'IGMp^?HtsDq$?=X}{ J6CD-v`(Qڽ͔JeBq{v *!NШ;]uyvUq^TB0‡A*Q 3-ia| qt?g}bPAzD@jI' g7n'ؾ79Uhʖ>>K`rIÏII<$,&z+F\NC͑GWI -R# q.s/[],Tr] ĦMc!=}1G^Sumnj381?K,dy\-?u3)yobl^c%SCHC_Gk&9-B[2!RgUVZ':ȫ8&}1nӻ뫶 oN|vm 'oAѼœ|0IʼHQhE#뱙BI lN=Y'O R${Ů1x{l08ڎwQZ-z%oiJ/B.JEPC6核=[!^z*[`cĝY6AS2ъ:>HK,9`kG[\\H*:<$ %xKZ9*2ѽg|hRAblьF-4rD(%srs@/PyQHf!׳9/✐Y?W,342C3lkf-_2Ʋ9~kUIf~2CL 1)3Ĥ̶ԢN:+Vm$ߺCwP1Cq k5jV lWs/`t fH7mEJUcw*|Ѣ2L]mkv}یfdft6ݷ%+Uyck_aKq.~\fzQaFٮXvWVG2cyN{;3п3Cl-1ZŸF]/V10!,_X}b *Q!<}hs2՗%9SH i mW_Ʈ\idM+rt^?[sr1B䘽ؓh.Ƣr'8r@/88/IwlT N~%Cp{t7&_1m G?so |!!Sϗ^/#È5P`f4͇UC?3t~(@ɇ_%~=(=(WW v75/ B8+jz}<GI&{INt20`"ǀtR&YʸgԎXXbef!VfIS{ƈpt7 ak."OkRu˥X: X .au"#6IUT.u<;^mE[E+0"SPr;'fq"lƉ>^dؕgk Kg2“9x>\cm!].,ELyQ+Su#= V,5vy'FWЪ/T}R]j1*oCteqyځVف1 8J_AIŸ0`V\xs4379y ag>q ECq>u*0;.8CjD0dX0:a|9ˤvA8'j'CH-MjĖ.^SOD!nТ D,[kmV bwp{P5vb$h}h{NohՖ<:N3Ԓh7y&HlPR!<]kZkwDwD_ECdmQ%xG=ꚡ$Z!qȉ+U[:r Q(@zP--󎕃?TltipGaI 4^2[MFVdnMFpNO~0¼ <7 'A1&;j¼ԳsFѧcВܼ}N..ķxHgR撚YQ`5} DιQ?1ȧ1t'n9tPv;(PVaMh4Kgv.H|N.!!汵bT N%Sv:sn4w@w#wt1I&;o>Y|svQ3E2Z9gvO@2 Sv:$S?{Dꅻ VЀnR4K^:CF{^NJ!sH< #oܾA흐7rJ!D5pOO]shRbPrKd&(xgZBk9RBrzas#FEf9Q`sr]ש\m7__6R5{nW~V-@Y'ώ/#J'Wؘmsb:yubp{Q\r":dz]ن~'Ī]eHpD]a@]ʦo~}5JMv& 47͸Aho.;wk, J{Dkބ?\Xbf1!hJU`Hqkwy}{=(n>X) uImcq !r]S2&GņJX°MZj_w)VMro޺ooߴ/f*{tnbbUCWc }(_j.f犯84'm+I.ߪ^B])J)1ͮ2S2gF93neW{ooh1xـ>ׄXf2?/owq𫂁 f(`` ѵwY7];`(3gh6n3nvqیff.* ǖ w| .QSlrhԊҤFzFZqwYZ9A 9zb5wjcq J, ~*&h]>kfyk~F}"{g׍bnb^,` ৥X5j9vؖvN+Mǰ89,zHV"Q*Pt,{w|:fbR U;vgUm*Δ+b@O v@mrjN-﮶0 Doٹ7xr< r btBy= p_B5ȉvksceWו;,1nIb!'v\8*QtQ`Ws'KL\e!ת~Ʃ$8It% g `6KVf㌜~+5L؂Yy鋇ϲs:oiϑj;Z`Q&M#"1@(IJ(v>(:UGjh U]wm9>tѻ^;wr;mI=WGT;kKCF=e >:tXM{ZmG C (=EGȠe'Ư=;ryAA"($\saIXx}LhgGv_^YkG%Sud$Ivw+Xm̝XnDs?vKwUED*;s`ltTkv-FВy"vndw9+n#!;ϳ aA悌ㅁdDe}c(:9PDŽvA[l-i^-̪H>`I iwh%s Rven<)'M[78"n2'~a:+2W&#}(0 ^># p3ɖ șv6dݩ@;nݢ_xTOXsyYi}y>ѠvGy>3SVr+s`ѩܐhoPIzէ|0FyJyv'1Bdw1$L\ɀڡSOgIZ b]Oc #lB[,H{קLs.lV>l\ ILZ H03-E6JVGgP䘒B%X; c[b [Alyᇳ6U͖*:4eblRRX}DH\VZ X/]!=72Ks[%2؇3G]^V~-cW?z*GϦ^e٨d50xGBR! }, ~;uUT=-l;M[ņqUu@$K}K#AGN`sl*\pphV+SGg4RiI[llD #^FlzdS[8Ĩ<Ą*^jQJEC%P M<!D㿟@Bj<Fj$nHx?K4f&j3cj'9.zmӑЃdW UD嶃a%@w(G _U GQCmΪ<jxR m5o)+`*vP҈h!{smgez>j7UrVn~7u{6aSR\d (3t'W^ReO:} %I=-Y0iSe6Ψf`%S_tghNBdw5JNGЍ>X/n)%SO[bS6%ΐML)Qj)UjȆ؉H5nWo9"VCu(CbcJ҉w79 P"?M D]sScFX&FPEnnjMe}S98O'h1&:yl}j":{r^n{}vW'Cn^N^p?gVmাÅMdkn1Ox׫^_^M`ֿYw..^^Ϋ[=ܹ~e& `Է7,pMB@GJqY/h7iŞ],p=Wu(vNI)a;<%s^cktWB8˿$K@&=N"]^ h簨yuCpL4jGnP x''}#Ӂy2oKŐx<7̇6'-PĉvO1jX-i8Γy>L{$hh)sԢ .T0Sp`>ίM{h?ҾCv!_Z)W6c8 J T__.'Z%wj;wHQL56EgDlPlcd.ḇc !dd{+;˾ִOqb_!2[!9Rs0VHe}S'V:Fk7ch'4!xb/Z?&B%jm-'aA}5Թ j#%6f{HN_̱HK@,tֲmR ZV:{H+p!y՜緫RI)NXl9ȑ`*ɥƐ!bj.dK$B{;h2ʏ>`EZ*pYk'{ND]mlj`{JTg:33{kݫd`\RAn8s_'1n }ѹf`{|t_li_0>򙥊yvQn0|XEvk\*WyyW#Pt!5*=F_)BduΞ7"QJ)g<Ȧ6rH>oze*8(>Zcg6ڬ#^ iD&M#YF`')4j,Pd֊VZy^*&Nh!&bLx~GwL_=kPꦾyv ^M|oq*Նi9 $ yz]ԓW[ffrF2Am]Ms+t,f3~аˎ,)ǿ~.}Pd!Ւ%*ej\>8@cḟ5T NxMS7FR-M6^hX\3TАX蓺y^xJ]_i2bT2fprޘq(e2L8HF-16Cgtj?1AP KK.zo| -=lO;7vR~+nVE_~mvt˿mb޿]ν6O/엫|ڗץWȻ Tl5CnE#qoݧ@jynw0) CC46F韤iq$<#7זZ`J7܆Y׹mc:LٯDr&/cI~Xph?(p4Ķ-*g'_s"[MU9W~NRΉ1v5YuDz]+s5SNC(@vGB.ҩ*w.m"N:7^;apBg1O{Kbg *8`#l\iQP#?Ϯ|_gvy '0XξO?7 @X-]A-J<-"\EmbN”cȮJ̀548N1"&̱"" EA%+|Įo/Wv} dyh<RKzHĵ }n7\ѐbi}` ,oH|#$$@i}2oWR]?R}w\/Dޙ+mވw)xtNBI(V"yMr,d csգ -)rNۼڎڌb3~Vf-upJDr.Dl0‚Ro]7+v1 L"Ӯ^7zLƕMg&v4XU?v닳6oųAXuh5T?#9sMbߎ-9%oeZ3<۞AvgF.2yd[4'Fg]$g~a\@&*/ ui˵hQ dZ Wh's_÷k-oObv2XurZ`9%Ju=]WQ5޶y;3({NjV779kH]M3"}Æqu#v8z,.YAhGp=/sHo9']srl `sd~B/Nx89(!h*DsYcNv@`<:jg =970@m!)rv: ڽrjI)l,Pf٩ XxP`WN`8& Y;n"@ک+v6*'!+cjRDSNCbvGB02vj00;8wӜQ?kxBj"<+&y]ߐnvBJݨ| E%@P݄N:* %C/_SPӐ?|?{r]ك:#C :jYCiUGlNo-<(κڜN;IR 0:&r!9ڐdXڜ4:k9m_ H1WC0rAm7qJJЀAflR{0Drs/]sA`=l֑9CDe}l˳qMWKo=אm`G>]Wm3b3.$dh"e e\skV#%2 痟pW1yoC3nڼcs b%OH\] β@37.V3.匋D9.؉sVF='pkKFM% _Vn7 tHߵy; I ZP5mZ $W"|vKC<+bZ&d~f»6o&[\-6O6Vgzpr=.X)l+d?C͘ r"+'|y=[`);Ţ $4&\αH\S@Ә}|J| !(hCQ*n}UE6y;eӡF3Sc+yN8]).Ȭߝp$_;\[xQ(l.<9m]Q`7~lZlEހpv!>fwbcmcl_NFl~]$g_3WWNH&`Pm; DVڼNfO_8]؏+ʻ@\Sgz,6R?sr-7} G>j":]o-@?1b0^${ ȳ\jm˓9#Ձӗ˭ X١"6#̭kUϢHn摥d޲1W~LɆQ) l>S:cJ3\o]!`B!yDWaNhxWuȝ';dBA>%jNMfn _2>ԖL{}8ؙC#&Utf ٙUyxYylSNDx E3G-OkXqBjnP:Sf~#9!) hSC9j j:q0$g`}5})eG;KUNL8 $wyjy5lY3!5hx5>o\}2󺙒#="MkBLBam#PsVX\ RnQD{EcgAz'Уܠg1wJfOEo\ `j<DʦqΝԱNnVs_5ho6HC ,|*m"ED[=<8d|v{ȝeO6iv (yFX$_'mQM='+CHaZCưNYr䝀Y61I(/nSr!%~>~{Q!cUXu]tqX?t p~3,$6g12ȬK 2eL~rvWvUM̟3Jf~M`J\s偶n뎭_׹Ǘﮟn:K-Ct~h?f]ҭn;{0,֧_eXzݾn&qsMwV [mP8@Xumq|Ablb;TߖsYLfK EBA= -+~ώ S؋as ~2R"FeVwѽ﬛}Zg1GD)R߾-eAO&A2w0 ̉Y%@qT8a 'HH~|$sкPg>ɔí|۟^0i]}b|PxbQ-c UrlUm.·r1ҳ65;iOy9 /Lq/JyUH71tFZ$waTr[oہ<0bg_i e Mפ0W޻*y?HaVNj?xy^^; I_7^w9郛9i3ĐJ2~U˛xU|Qf(czPhG(_\^7ONlȿ9Oc5sɟ9/.?svmo εKb(#(|xkAxĊcok7Q%ӣ飪=XHRMÅLXL~"f!K EqVDhadM=Flvǖpx2Z;n-9N `L=l]*aO^ޛ7oVޛ7*N({yNC(ψYKFWI'wX}s,m d^SϬϴY F G9q׃;CS4I BX͓m4?$PwzqWz/ "Qh>=.A9i,UX a6ƕO37m¶r>Lu¦o֬M@hiNh [b@%yZ&L.B:#'nI~h:fRl3I4ϕXe.|cf@ك0YQobΧJ D)1eVw/L 5eڙ;\bJ=8sf@KSCrj. >Q;b"y9=v"Q9ƔR;]&PJw30GfR#0b Nڭoh8&j:.>Q;GPQhvYaS;vO,qy tObR\:|oj_t<GkEo;WgD5" V8?ܠr\m*p9hTk~PqM!20N|A5C㮄dmHEЧCF*ؾ=$} YLJ(iLHJd؁"9~**&*8N6qbs%$gFS*O;VTz@ϰ-R<+ADRQ`+{b+إX`=]d c}C= t} 0a6wh-;@I<* @t䗮9]hTׇ[S>Ggd۴`t&Ul\WѩNmR`b!2fNB|aݴVyL,FEi)qjʹ:q%yU%4$T6ψ45WmLbU{ʵP֮"bF:][ۤ}JbCJklZ詶QVgJ6ˈ#GnC9Qk,ܷFgyRif5ő+\/1m#'j;2<=^cK'=}Ka{Ne^2/UxyuFRxy6vug<wB^fJqSUd8lE.Ղ*llR`O9WAչOmtI8`3mlPuKx v&%~v2;:5/3BUL#+Oqe'+hUP1U76u 9vBT*D F)&*84-k4Yg'*dLöof[;+7)("fLCĮ"(QfpRyej[éim5^3*A5j5W}_T֎#W~8$(ݲcUZUfdD !!/FAum(1ֲ)¸Z T£R2 ȷ;=ޜЏOy O_Wj}gd,0,^EƁL%$F mf,SENhtK-iP֦ox[-1 ,#c-ՇCp Y 5QٳBnL#Tlj*HX;Dڹ(!YVۖ5Vk nP+_ k&/6 Xk@aӈ̕ ^J8mu샯T^kkr?Ƴm@d&!\h[ZV 6"ΊY䨱d`͵2H݊Iyyht)ZÈ oP>\F-#" d\eG<4FdnA_^Nkki/D:;8]fm^.o|m~=hn)eM,L_Q΋U8-9 r6AhRhjJ,؉S Ƈed,neX1'GLE*('x[RfJ ֟ptr$_PwZ;G^ẜB^qB@OՖ2Ӆ=o^!옵ڡ!*_^3Z maЗDvY+{Lԑ;f Y =Q.N`j;n|Zh Dj.B>]vNf, `'h/v {dhIKLE*Z5:p%zl(ѫJspȶS;Qݞ1A;k-aW#Ї;*K!Q;QGRXa+yc(Ks´+QmvFC;jG2c/B^ΖBʅ=}5NWw&ź_f12(hm}zy2['rIw[c :C}f Ɠs^}v=~`Bb\a}I1N,R\} [n>|sJ1Bȷ;w5X-|G?THC/GMz.O&|g+WL*Ϳ,1V6(Z6d/+q8K{|M(+(h{!khO$_%9< tR\ܐ'6/.ק֔opvR0^;8ۻ dU 5Fs q5x'碞IyqE)#wIrU5}}̣!Ζër1C<m癇fgW򌊳y~it 7l,>xp]&(} D$NxlS%&PLJ Ɇ6VsO^A!OPD;9Q\-q^v0u<=*kk{m9.CW'} T#R%!R\ (R/l* EPPjB?[lӚ~Mj,@8kר[%] f h L `ceman!LϬfH~NW_ rX,dzD(,>f2xm`\g8c']O*ɰI,1̫ {pof4KLh)i8$w[Gk-T8G;{|-f9M^vR*h"0/9cpXtuߗpz4 |2MϺ2 =YoK_tp1(2*cxNrs[?N-^W.W&P1!WV~ܶ\mO6PלU饾:W(pw:uBr,oO\=UcW&B|E] %uF/ƨ StNR[7M>s\>VGkj><؁ju|,\u"~I|V(MQ68 w=}MVF7Xc#O>:Qyo@{n0m Ռl2Ƴ73Nu}-<kS7*T?{JMt?sW1m63S*pnlYAjZ#ߗLK~{I<3Q/ץ զ:Iηq#Rk01bΕrӏolINV#Vt9~Pd UeCUDiVqc ^9uækWWۛ'9<:LEY˩Ɵb=}+U4GU!^ sb{H>rg4rw9{ ;k6/ (4^:<- K7;M$ҁuu,9aW{TWꁐoeVBne]؈Z1#?L@2+Lp}~"U;_d xNs֞3xv~|NZrôNvM .kʠ5)Q&9P1MeP7U\ bJ7#S96>ȑH/TD4֍ޜ\] _./G 0 G( _hMɷe5맇ݘQh&,sfݩ -4Y"z/v F05+nIV2Xra1hn5C&g?+gɸEW||~:jTwY/ RBys*2߬_L"?ε!YrvQ.0yk$Rm]W6+yڏ t]7x;Ikϣ^Z2*_6-@٤TV%[V=Jht>1_x"'=]^.Aw:pP gfY(Eۄz.%cb3&_3 xMhv(4{ǝ jdoNݹa1*$ BbyKJIZyq|^b:FWo>1| rgS:r5@T4[PhzSwmH_umr8zh>\ c܍3\JZ+4%.8/LglԸ=]tW0-7Muy)~r`Ko'[&y^S0)ƴ)!K/Vi^ҭ≕3BYX a yHqOO%O4&P1ʌ2p"geVl;3 gq}e0ʖY eȜ(4>VҺ<n܅2H;E[(l'R/yV3=H>)nMǍn{IN-0;H$SmEvNd)CJV|J:}mup*0~َ_9m5|Ԙƿ?ӛw'ڦ>~0Ǔؓ4g7ͳ>RͶzr;~ۦdΑDi=ֻX)U~V33}88__5"C5G*zc+3˰q4&Γ%1t.ǣJCcTR$A!CK1; @x5p:9V1%^EWQBY7יF^əӇju~r>-.Gqg\wFftysU˛?߾}5i1!qk pdUV\χ~-r ;/u^.T@z\V (\4Ϻ%TPBAI .jwX>i=O?fɭdF}z8tW{[qKxPaͨC~vy8[WWz{uszXr8oeVqPi>sqr~2:ńǑ8 Xe@jl 2>@%:RRip2FSw4BHΖ;h KNMRȖk8d:]I|Í%OS;2z4v8)c 0ϲU* ̝U O=\T_5Lv`iIuڱ6l6bpV Iun7eo*\ufMlY7vԚiR!IuNRCԚT'SdRfa1UYaV&.vvoƤmj]R# yR IKrӆKNm촉kfD9'B9'aNƣdI]ZGt잹flvJ=3&`\<ְMT->bnARփ0" `j7 s@R2+\9+8jvIK]+I .v\c6:19@甲%n%YR Z)3*=$3&ˌ 2c%(]ʌYAbȏ2+u־\kܢ7G,Sun.s*..>uE?9Pc|F^wYyv'/"7"\FCƕs&_ܱ6MjbOAJTU82)V..o 籊ޛ:8\@>e"ޒջ b3Ke6iJ{[]EHXcX uG"HVYu)ٞW(/ަۡ{nXUtL/yW{YN ZoYfzDf=zt'6-rA|x=]RNDzs $'/z9i %_z+v6EAT0m3T\!+sA. H5} ;ub yƼSڀl\j3%!;q;%Q'$@s)}9Zw# ;,#c|?i}8mb~ZRգXYV@W0&(X/Nz(T}4a!#"կ5v١L:14SE mhq "tإk[|{]-RiyX\,1OpSA)}D#ױήơ&S|V 8#c)mo\p$rЦ["k SJ4o$5נ֘:jdkބ]0?eT]~,_ (% bYۺdZ ѵ.;J.GrdzHiyxdQG Rҍ6kHn],f)YֶR9$Ѧ5֑Îe'k"/("J,-CȬ!c4;dTAV9/nݠX]~{6%~6| I|$_~7o5tZgKM9'3H"؉M\V-~v#F8OFGu4'[>a+>RZEX"+YRa{\(9sYS8Ys_ך#V=9``LS(rh@hd͵(Hu5ltB5i ylɘKr.Nݖhv֡H7QN9嬤hr(ص*Ĥ=sŎh%N6@aT4Č^~zwES% k "[e_.8cÃa˂y|fZ ȍB F?+9k߼ȯsy]zUBUIY1k /Ix @TQ9mt΄=ʅ{<*v vRe.T̡,lY+5/@-t/*۬OFY*v)݅}kޗ^ZPkku,x/KQOb? ^O7:>/_(qfce}+8kh$9]e.t: Bhg!E6άsGfu%$7?NV^_U}ˬ !ӏpeyQ8\pVԤm͔rZ$GpO|Jǖ.aOȊ˲IN-q9G$%)V[T`$..{ W,yŹ+^1\] C^1tfmGRHRsbpO?{Wȑ/v,S2[` 퇻 gĞ$˒e%%ifc[]dSE>ϡB-zy4~OY4.4?KfkÄ⇭-iw0scΠM(~PӅ*arOBЧf`K  .h0K0G= 'Q^"V cnb >vjniQ;<4yާ3jgny^N6|]|+- y-`[]!vL H䉥 9/?i/'oպ~ӯNWy3uHēl Zr(!N,m߬V <%xc;Z2%: '/.`Hܵǘ(}UJ5߱$GF|X(>l1Yt|Ǡ}=-IsRcD\)ebXG hE@fCzـ|ȧ`s>$/\>t#jʿD8cق5hNWyzB'8଺S<iInOձ2&k{}"h#8{n)Dim `+kֳ=_x{.~P}q/UÒz8s!ͯVU0Ijw]~ze%KOyd}^gN)t$9"oa|2GYH2!1QZݙ1P&30<:H>zIKWWCyĖ~"!}ҝ+v i}IC3pEͯF-/[0L|–(?m[\M#AtŘWqґp(DUt\Jٕ*>R3boکyfC/}4ULEv~c]N}ߖ5Jjjw6{3 Aqcz})1B/ysf>[L;l7UdnP@ˢ *Jn;dUg{rǣO>c?n>aϯCe U3K>Tu"kc9m̗l(h({Q*Gh#qk0-cC^.;MOw3{P^}o1f,*+"@>~ M/^r0Lћ=G'?bnO_ϯuz9*\6E?}&,u[n/.k?viVnKN޾s2BEVs>4Y*i=Ǣ(eP4]kX nP45r80GBR\:C=s$~4延<Ⴛ*݋/ڻo/~8Ud7]UVo0^&#VgY08l+oy%1^@5Z%6?8i?;nGYyw6Ǖ_pzsyoO_r#tnb,UQEu y!wӜI֫Uo'Q$s-mC*)9dž!V6H%8͘ߦ gtZmhzQ~jܲd\$h\5_\cU虂O[[raBq39@6,i6KͬNBഺK$FZbn:Mf? 0PuDBMN u/pXcJx"%J$GO^Ʒ)]Gg?c 4S1r\NFm>\oo\ܿ:)`H\y|PF6Rj:uAbxB5,Jd'56%R@șM}!7">oSO ڹ͂༞;yÌ*(Us}x.ݮ8p:[Pp{:$0{?=;oWDpBO~_,?&``YRox,=,9ydFg|R>_wx7ss|^ϗ_;̀!]_hsEJOuѮ-S))e?7UKTѹƻEW\PHSJS>\~UW$Kt7Ck?\6 Lc:s: Tb2")V&ts]h>qи~_BkڦݻoeuJ~}mᣞ|-XUExW鰮pኽ[w CR:OFIC>y%SZ#%.;j c#K0*1ylΨ|!;vjG6L_q>9eh{n^V@L1Vr(ᑺ5GZ MTAm-hIΡP;uks5t(Wa~agvڭ7zT¨ݡS;섧D4 q̑q]5}'#J?9{IkLQ׷}Mۮm\tRU*)KJUh]W36e(;W5u/W4%u{vKW=?}GDo>\Wczy 2X@B(E(%Q0Fi:amטJ+E*u% +pXWmn4Gi̸}цcTnpׂl3F9fцmMT>ƦȪy)Jb_ե>_CclXvNbǾC׆n%OZZMtwZYߌzbwJ{33fLƸc; /F)n'<=▷w4҆E޴6וFt3w(,Xk1C#~ :9xRk+|Kd]3׵. ˔r]GP~?%,5\xQިݡP;uX2)IT *@076jgnx ɨݑS!Oڑ8*qfs2jgn=EĨ݁P;uGB1jsV i >ql vSvHLsة]~RY;!b>;#vkc;B2-!ʫ >s10Kb+=0b8XU;qRMwDG !7LtǨCH Sw6jׇ9:s%Bi|̯j9 Q53jwNC;z1|>E00 5 1jFkr(NA^.Q dΨ0vةL)!Ob`6 1avaӷ[BVA UWQ.F =;#vC#'vƄ<%bt>!c$h%6A'Ez@eh4PN([D6 Nk0?1hBX2.0[td0Vtё!L):RcOqD OYt'+0?f`*#,pgm!ڭ7xҨS; La HO3j Hp{i?Jaq oct>b`lcCr {֝)ߝ_Wfx@n'uX zAiŲkƨ8tPw亖`U'umK'!07}=UMrueJdEƞ. RHj \0/jE-uD+c6M+6C<|ctFAbQslOϧpU4v-Ɲ4">IZaT JmC]^-ߖRd2b5Bj!4]RY|cԸv]W7CcI򫫑F['zHv݁>Xc)@ZYs%H Pc[ !e=ERF?ꈣCie=&}JكQv!9[Vш hDDר0IHvVة1yK:b!#EtΌmg@]Hޙ2̑Zc !%41 v=S7Y%~V>Kw} R~X,J Q OSd㽩x*x/dx|CQ*[Z%Uk%`Ikv;̈́.@(M 첏MɳAvAv{c⹐brŭBrC4*%Fq+Ȯo,v+^:ٹ3LjM(ZcBs^vv;2EmsA;wڭ˜Uhj@Whh7N9O .ZkKfI =*q\Q nCL%[JyNu*u_V$]6ٱLTiMӌ&f<hh[D%"iw.hdή[Aap#ihCߊKG;QNy XBaHcH_V(-n]Bv|&vvMW]&SB;uҼR,>i dMIuӏ9?v,G\£bQ߬E)])/ߴ I_kW^~/]~ʗu^/gϯx!C(.)7on߾.ݻ/]x῅UJ/ܼlK ܺe떍.z!o~/)&PHP߽yǽ\!fA۞I_ݬNy4G6=4:S4:)L#E˗d}N?t<Ȓqft񪯘wg7[/~K/Dߝp "v& 7wA>N|:%)%npkpl5􄛻ѲM)ga-Cʹ`nvu2AhgJX6eN*e${ *jh7!;JC$";F3N8A<7.nCYsA;)nCFNEQjQChhַqvv\ҔmAZ'8hG4ڝ ڹ;RݺJ* h* ۡovvF<%s͓ <3Bvvie|8]5?wZ!iq|klǕ^}+U,t0}_JJkG?WfO?sfmj%˭ɯnM<]%,uRf.^\yNK[e_PMgvoF.I.ZmuR#+fZ%6yDC7T-$;*fljAߍpq<w\r݊aN ސ9}P<α#n ]8IB,5Jd-3&@@җ sA8mUk -9G;OvM}+Ydm/w>bQuYr_ߺxJeYK}a5j \>hThB9Ta*Us~j#}L)mdǴ۲mIJv[&hu6DMT P\wuD"Xݡo]xx7-3QG{ϩ  @@ cdܖ96Ȣo][]8ڹE&5OX)YFhhOyeO)]Lj .@eĮJ]L1&5fBOa<*d݊,4ss>ҖJ|F7(&=$q{{yi1\GXMb:̆u=5环7݁%ute 86_PD5 (}CA9Y频M)r3T΍5Ü+΢X#ƹsܐ9JCJJ4*ֈ@f)h^:04ye`g;ali)o>Jݙ;C$hn\qHcddזu. LDvGXкMk+GH`w&`GIb9vCԭYQ{<7l]]S83]>ٹKT50 SKD9E[@=(uڝڹ;2r(AM+-.Юo%r쥣c2v#Έ[PQ.$v>Q1l+*p*@ܒ9V,}v[XtTtЮjRsK QhhQ\!eиF! Cdv[a p˵M j=]&<]^햾BuȁvgvRې9$HL E @ՓvvF^4H,dI DglJ{?JpgCJD\#w]aO[%g4FD6H(ҥGFARx SWg(hG>#2vFLv2n(I2'@@U4o)Zf@(}p界) 'g#P?cƙG/J %-F;w8O&Wn0n{+'0ݝ3AY |r[ͯ5?9·=ә_FcLPgLX0'K{vӀYc. sC0 rHgEg rۍG9kcj G d8ŭMnȅ2R.#z ;PY!nڧodM(:F CYdEdZ'H枉 +ŢҹÑ>n][%'m+ٍQ~?n`{ts7J)]ͳl9.n!C¨u.d6dUjnܖ9EU%.ЮoĔţ]홫eJh>lPhgT)A]cfhw&hv2gnnDɩ. ig ۡo9@ Gz&W4 -.n #kw.hPJ^k2ɍ=Jmb,dY@LQݘuB敜*' _]nˬ1 dhp [9W, D2Sքhh7hwhDqJh>bd ۭ|EaC?swd݆ʡ$`4څ$.Ȯob=ɮ`)Jvh^lI@}! KD}{L}8>T~y`+%Z "<:!"# :I y\`^xwyZ9Ww `2S|(J-/2CO6?O0`VipH9mthԭ0dO!&RKw(D} aI̘rlRya#<],xJ!-B1##}m@3A-G n +\LH1I )}ےf%Q>Jq;hNj\\_ /Hu^eur>Зw5c1<$Du|?CUvH'0!\1a/?ռjz}=}nO?3if&Сnl4rfԈg|,t \pK^DkrܾsTúֿ5g;^m~__}ݏ7>1~u 3_y£F4z&%[}+L6..Fz>ߠ}*5@g>BH;8/N3)>A2MkA|c͛IP noɂ$ʀ]{m>  QPh- MwȄЮj]6Ҧ晤bhh[r.x.h矏9P6dΣWeE2%@@G>.g)H–ec(vvRe:swF lZO\mI_f1AvMy+ A.Jai;<)HS?vݏǓj5@ Dڳ&*PA{5F ܍BG\N44pK{":hYφc̱[@$C=H!n1FeKG;wcV5d -C(XTV сG ζqYiwrv!*X\簀qvC!aDkMlP?ʄW2phapyS/ӷOWD8bag1\%lməkmH/$zc#vv{=/ZRHɖo5$<( +P`=3ӧOGэb&Q s1L^R]>*nuhzbԇq{,{2IgvX4=FsQႩiщ;v[ $\a78i<#$34:%iGn+ $P2CvӜ8 h$HUgIn픤FJ+mT%9㔠#w$vPB1E@vc$Vi"*Z(S1kN  4kGn9GإJqJbAH!-4I;v۩,RQGv_VhI)Ph!*k iC}v$MIM)P]ځQ~R [,6 Γ dTC'iw(NYå iW9iL偭䇔]5Іݑ+;F+O(}AyVp&eaZڑA}:&hݡ(;ah=v8LIsh@0MҎ]5h曤~;vih8τǹBPz{v;9x4%ilC3EnS1e,wVYCUIm!7ciGñ:t`Asn8O(圦'& d,B-T5!p;^G~Z+ W,bUQp@pƕ^Uk~V,xtʢH$S Beyщ W)F :4Rʛj;$W}BTUGVyd_C77-h@q^Z[y8*Cxqg$Dqgn5.Lp4Vå݊$JK%q]qv$48d UAQ$HUP!Cvx >jaXYk8$M2&%UmbeU8PE2?=;[ߟI6Ʒ<[|cyH`@HۦS dlBէgn0#q} jZ_s4l0euY'$s5s>5q LcF50zĵJ`A}4݁H;ͩ( ͩcnCF #iߴz.S씤]<ŴVq-$-E}!+8o4EtFHկt~Q4gqWOnFh aswѕJ8nl`-><]r[yx,[|8'aw5)j$%s3A3xvj:(ulYA46ciBN+f||>a~=M/tjc_x8_>8E~'4">|"BH,twV(} tmc~:4Rmj 9hthh\3Q3.v3Ttm#*Ji8%zJztƒ.63.? YHy4 & =mM6ńx6@faxe$/>r?ga|65}6}rAzw^c&T&$#5jVd;Hͮ2BT:UVM[NI67c)a&(%UbnЛ3RpI̞O}yʃQv<֣k4Un'iwj G/8WVTCetmLI!a4OIn m&E *⎅ L*%%hv.t$,B*HW$Hm>ɍTPvㆤ*Yná7i1%M$HU񛵒ݱK;W&bmUeт,I]gU 98{7޲.Z.7ب^进od;/ĻvIɔ?^%v˘Rikg/U&n؂yG4؇RtFG讋m Im6<76P (l`K \V07ߵnx놂(ʊS ;_^eۛYS4-"~r$YtM^Cy{%3wx~WkfL?bEozi*$яMuI4>kGuK:ib%GXbkb ,K^aR2~Wo^R |x=:`"qwsnax-p@ JTx}`ۖyˋ;`lS$܋y.Ew%+}{5W`XVT{j A ObE ℣N [R3Xsֹq+/3 X<7ЩKd{Q)ԣ ^Ned@,e:nƷ>KbֻI'gup6{v{2.FXroyO|Kݧb2d{68ҧAFKl (ThKYeHq2Q]?o9k31Z2 Z<d)t+։<ʳMG߳n`rVK+H®Jneʔ-YODO3 РpTHeMu<̵& PX ~Ȧ -=7l0RoQҁ̗J7}%r8o[O.{R%Ksq~m5^sb2(P}h`2B!1Y([e`j/V;A2Dicc ƸPi\<˕wIf KtXܢ`RڼКE_?ϧ+!DNW^u}Pqz ,ޏϊi]bo0fmedJ=MN20)KLlMY,UCװ:RkC,XcXj]¹auߢM fl Q|1 ce*͊xݠ/@)`cmv4+!3LRX>"@L锥"iM}OhrT*!:8L"%pZVIj \ؘN^E]ŐsTFm)殚ږ&dGaQ;j_+Ӿ+>imi -` mdZA-,|?;`]UC>CŬܧYC&X-`P1jLPF>)\h4~l_$u&㠂s#3OH0|&D[1x3-*|d/yEIi'dU=THl]zI1Dt}5~?e^g i" F<9EP"N@&Qk`>&UF$}/}iaFd#HS4iH =15گвҖR0>1>g~#kjjafiw[؁aBQ;Q$*1 6cHtCjb`'kb'2îƐ4A9% ! u?_d07CҹL>oJ;b֊pMxqvnT5W6y azfͲpAikl>&zs/ַէ5!w®M \6)R 55&GZhzLO?dwo<;R#M0NrǿZF}tQ8[gCj󶋻Z:;+t$rưHdSEj~5-6~}?J뽍.]W1rgZ /ɭ{5a]6Z[٩M#;@ChJ,JCײtd;8;14~iOmsSZi$Wh<^3&c&xA=6ݱ޵m4"Cia{'-7= AriDE;N~vI]IIEQ<33um6BK{d niugN"lLf.1$"F 'gM.qF48&bv#Tp다nzIkqbx 1hg#KX[[^Lؕ6},~OGqGΧ~A=H9-'^F" O}ڐ/`PWl<3b9Y$!*> Pjkᚏ:S3Py]g/33b`81IόjCc}:V*b]S\4-ckkk+Ks yTybAK3| jkGZQST^V=[-F8^ij~j,ߘ4^`ÄOD0aT @Xc<~ތ91l`AJ!P!GBB`jNt7:yJvԪ\Q ,=l& lf.( 5Tjf.#̥DqjA0#R&8Z4]h'.R $UIě78awP2nsv 'libY9*w{NrZs)*ZIuo5KTᔙ~fFKgYݾM\h4nZYV 2\5ǺXOnlUZLM݇ c:MO#7rӛ*N,GR4GpoȘL1 %i'cIYCrreھ4;mmmߊQi7fR[7'jV(;;Rb|rHqZB3Cs@2d Q3)Cꀄ  TF$KQ K4Y:%(]۟L")!s?iʘUY"Aᑯd  %*Hv9"C.y,Z6DHI!l PrN;oeO-sg9Iivs]D65p ;գǟN鶫멮q Lu[uMޣ(|lp{> g=+m?xV!Bbl3| QTOm_8㎺|C>z_I0v&w<řL!FjmUk7sZQT,&Ӱ`m $Zp0\AŮM'NytXDo0}]z~o̥v̈́#D je ]Z'Ѫ|ʎ=3Z%l馃/dKoh9v-cL Xi)[|Qph:נ$W T*W b7$[Xq/ ط6` IIS=c}=b7np+35Vuݯ1@vCվ(S²Y|Wȃ0Qz)I+*k7>XJ  |Ս)Ti'jW#$,A@k&DDS"p"D@%h: X+xTx<ٚ:E("W䭴TQmItvPv(B*,9̜22*$Fj,2y RC%ƏT29lKm/_Q c%oxԛĉXY2)2[F5mW++hu;_Q"ܞeu!:!Ve.!f^7Q[C(4.!<_ M kX<+,y.tiGTpi9:C#Cbg)4otFccîGRU* .`}xo(8X|}M[NeJwz-)2V6baŀ {!xP׎txFKdZw]'$[m⭤fXIU9.m/'N&psPа9l !nl5x8P0|oe`8 ,F琛5i*nN ʓ% DJr }7uUpQL㧿WO:ekS P w9&wn#KJ"q=Jod\&/x=JFh?mA`@#_}|(+)M`:\EA)daB a׎kTve,s:r )sY_^J#޲‹9R5»MxR[Sx#99ӵ.Ř/ԃ `GSm%i6.J;$h5q88NX#ۄ+%`Fx^q /m[F-+&-uRjdw.dŰ[Q*<%;Gc*g~F}% RIi<>5 ̥t`^FDY?7wMa۬2^rvi(@TB<JHRC)D$2) q & P˳|S/[T6[%t޺]ebaR 3pZ6?Vο2CߴҖyMHh]郭}PZ82__2XЎqmX[g]\xt35|_Ur,K :qrWA hđWel6!xFy#Y)i"Gg5[`,-)CERE:CczK}7{>N`˳@?l E>X+uiB=RJVG=+}L Rڲ3-}MKxּS_ Cc5~tnI2|b>/րre%csOs4Ef^O4dxgy\=_'Xwu{Oe W;vbuEL$^.Vl!,r6Iwfì]3ѧx00K`)sޟg+˰>y& ᮹0\l|² ŎTgS4Oi9j\s~ i)nq~q~ܝ쎷8?H3x*x"bs')E8?䊶;*0n,vr3G̔~EqF`%d~fᩣlP#CRF`tq JB|JÔ6UQG%zûƣj<ƣQ+'?hS%B} BۚH^ʹ+ETY1%t4N*< [E^V6¢ذr0ꏎZ@{$96?db˥6JOȮE6v}c7vBF붽B'WA@Rߧ1E`f1LJd'wht6=8=T4(8ckX."%bI1Hƨoƨ_S#O"-V xR#)χf^k_j <" 4V"@Mxru 6TӞ\ 7dcKaKa8lR[% HFz$TGӔj)"LINĩ,,Iq)HshF79WjB Dc&}ᇚ(@PW@F@GE ) A"BpQ#.0ok}# R#)lK 0>#*UcB6j8 PPiO A`d%۩g,^3 {e_LVI;j>j­c?_x#5jJ%QC}$ ȏ Q(j'HIB Cɤ0 !_a\Q V8!~oKMftNzڲr9/괲R-Yc2hWlR8K]:~_ŢjwmI_!%lG+qb"۵r )s-Q:r_iͰ0Nw]]]~~s2N0Yx꿛sb뱟.Ԋ/veEI/S JP2od#$+6Z3+"+Mܢ;5*J^D# 8ijP *Qpy0xpt$(Tg#pdFtƂjzq,*.d D֋ #D=̀9@/Q:Pw(Oa\?E7'q.p}1LsQm/R7 q=wSJ&ho;ޝ\v y<ٱ.x@mkց`ԲQ^%2ҠpV=-D ^h eq!)$y@ƌS8!4|:t"X:xw#p%eWb%JtL4>J5Fet()Ba4"r^{I_:PK/ @U{06! K2rN_>Zb< Q *kبwx\k83h-5H@k-[S{P8PM(|;Z[|H$1xoM =5@ʁR*JU3PpORwWWtS!| ~?ïl#[eGʎl٪ȶ+X {͑̚/*;Uv 2j:mP,5E(j{Dq'mF˴:9O)QWUgǜsP)w,7Rȑ_$"."\̉"FDr/h<4IEGlp(y,RLb"@!:1Υfexr&%x-Xt_B'>:Rax@} Dkg$[f""e а @ Mًn :֚d2R"t8!Zsd:g8 7%aC jvUsA]eP>RnB TA(CC4n-ɐ޺z.:9Yz]%htnx޼^>qB/̜?*1nghpյSγc6-LVۤ wD)Wn$*R7/jݖQv;諃rʻo6_ >YD3(h(h!!r4 zWC͓h9_:ݞ t*=1o= 7'5FŠWF^QuyG6-1CR0^/6& R+ Gz".ƊWW8t١:ς[ C&h5Y G`]bfVrcYӻELcyq|llϧkSo5KY}r@bfN\N\ \d0ͥyO+VY-ZqNܻ3soR_4 Gۛyk:.1?eQ矹".]Nu*}xQLI.xFsI.` HP ր5؋h!ޢ/o Eߛ,5`d8}Z4Kv diHE5jj;fPD@O(ZW@>)C2Z(ش: rHr'WsPqO0ҧ%L!~&4*oޑjr6-IhaRQ Ǘ Vc?e-L L).tam⠈n9kv?Q8cuyxl';yN/zSݶ΁6z $Wq">R^ݽryV>4N&ȣ* 'lɟ#zXRpTN=H}=Mp+zOtDH?pz̍S|,'M}6G@ELCh?h2-ddg匏V(Dkq.fy.*F5EKuKXQ](d~-Nz{uR_̕k5[yvtC?P{J8lɢMk.l))^i4.9Էwk5-_[ݾVky *WLWGU@P!U:TjcjFgG 99J߃mܪiF*mDj7_P/}0&7lzFu*śSfٮ:π 1g{o!L4o eh*# 4&4UKEZ] |ke+d]iLռD0u p*jS(J;K\5d(dV1N>@uLʀ+b#h̖{|.12O" .d:=7" n) JA7f -@͟Zpr~`ߦ=&[qM.D2LN̻6@.f\. q#W aIqG\~=-_7PzyMZ $ Q%em"7NsdcG%!%"DGޕqЇFQ(l5 r~haΘ%*$1,IqI.R=jg913[2KQYS4 bQ==x#&)y]{pZ>8LbswfL(869% K6 bȶ f]TW;v^,}bwzg}wZzEX#3"?. cV̬FrbrKI;1x>cl33ΚҰ qK*,M97d`"`> Ū=w}o9d[}77?.O2+ZKBqT>D{5xssߟ|:}{q3hoBchԟ~hgG4SNka`r)*?N3&oL^2&?&/5. U%|< ,p6g麟?^^OVW=P*'qsY6/XZް?9hE1 }H{#pLYҜܻEq6=Vw,t, xX[ak2[# -Q:U',im=m,j( UBtM /J+ Du|Oٳ9<RS#$頼~07N_ `De=~8*,]\mMK H=Fgړ}m(,x*d߄I`pFGDnY0 r䞝9H G.bf%^x{pשO3lL\N`Sb̢W ?t2߮XPwUᾨb/JS-(/K+䖆geܪV!`̯tIHkaQ n*ԭ` {pDyk'׿/ܳl3)ƥ]jq9qy'Ñ̊ڱY)hlΊnnmnGpU9^ :u/E݇+ 5WPe:܌]H5"觖$Ip4q0BHHm۹GB^ Cu- :eF &Њ+Y9^=/FHT )r$S^;/(g8I Hqv[?+{>pj//y!1-zuW9U9flB/)8/ Tg\8$$"W6zl%lH8ҕg+Jrh=\(ٯW^~{EI^QYʡd|;&PC4Cw"೭ɕY"V;8)El1.̮h`%K6 fDl(JɉPgGVm{c2G&0t^Y Q>c Q}F LUk(V.W_j+FݏmhDCݙk?w>Erq8ٜ-StOrjm,vâe;Ft6l/h| I}|PC"J'ġ}IOU2pږO̦H!kM@#2iU8431 ~&4ӽ|OiP;jOSFΌzR2f8NjqZ8rEdi}4*H/FkFӡo] =fe|߈I+M&  i%wn <[TeH%W`.Uח]sf,ER/.hs{5wѴ JN.ljw]p-wvѼ ]p.vW2J+]p \MR*Rޝ^#+Z!y/pu9v*ZKyۥ#z}˽niUqZGmt/%퉛@K bm!rӑB A2",He+2WZ((\Z 'o><%lZV-QPBv# O-Xp>SF8ҳ!3?t=^,ysr4 `dp[s_Q!N5!S M+GEZ(Z\hL#kلI卌H5bdfdoe4ĕDFi3w %#1CsC+ɕuY[DK#^P EpT 9s2wKH:$,xbzsgQ^ݳ(%J~/jz yEbt5篇 YCF-0p> nOK1;SAqd[:f3fj5"3ذV լ 14D v+%2X?ܞ|'XPhS c+#78 !챴[jh9qRWiDmH +I[V<`8w_ݚZvMZ0a?n1j:rɝ:&r{fL3GOPrUOJ7apuE!4κ tE2CMpY-_IgG݌ c}i(OüTxzG)je~QK\_|=K]JFn*fbzrH^觅 Of't#!){V)\TVzRZqzzeڊ홬fs'cCDٗ'k9`ig3>^,{׋׋^b z=[vWcT9b,σuO9:^hP*f"'oYku~Q?w_7-4.&dʇ09 F;n 20zby+Fi%KE%j <%;m Hmv?/ջ ƾ{C' IL #CQ;C,1gC8),ejgdǧ4/[N^Ln,7\_,y6Qa{V |:cϿ߮ɂ f}nȻf.IY7Ewꓓk6F3$xxlC )5L84`#Ts dZ Y 4wUazW Cp4 Xx" ( !Avr]ʻ|oyZkFgg cBĉa*sljP!o#[# ZJ <'Qyc >D/tS d"UB1O$Z:jp-@EY!UΑ9)kzwk޶zܠSۦPTb7(D! (zj0X⌕* b+"(Pg)Hg1`}ôWQpJL Ԙ JЏmC>'b{i!8),c6yM6ZD>U &RQZ 1o(Y@JYa00k2e`N8 P.JEEF` AE ƜPem*RSlĀ"R8Qrc.^j2K {"H`lEEɘ8(ג~'W  B ,3 (:$QxZ[N0 cBb@0驗aND9 [Jx8??DE +/@$=7SXt*rDa!wa :Jxbm!1DQ ? $D#I"7RPilBa]Dguh-訠0uXg8nl!2Rpa9|ra1tԊTu fW8BFm(*=-<4Bc~a$Z$F[ j4* < O 8 [@Rh)##/b;ڐ4sftr \s\8Fy2a] @a0!r@R+'v@gbX8)=wqF?F6× ~(ց\ʖݸE}+˻ ,Nt3ÙC\o䅒~ŘMڿ4> 6h(Z}|hto C6X~A}ӍG]tܰ nP4{`NFp"9/D=9ydIͧzAM z>S*а;aƏl&y e a_[yolٷ;}zM5:I3ϓ&5O=V!s౬q f ?6>Z'P Hsc/K흛]YM,r#!=ȷl<4~'vv3Ey#t5l@mŕڐrr!IWb${PbRAem6~he)mVoE_Vqi Yq^s# \+z_LJU^4AxȤjwZLʾILQ-}:-qFih5w CΉ}g:zt߉fUYPAмv j) j0/f^P;5R!ݧ2ՉWe{8yHeLS~F6]k6v9s&zX_靏?ͫBG#A{u M+Q\ٛsܲ~rNe^R. &[/:Ku۞ ^?kCY oxW\IK]|<\ oQ[*&92k MNDKE'r)*slseI7[o+z0kT&]ϓ(cM|zQ?]`XE>w7)7bT|NHY@` icƫe]3%KQY6dV b6/%SL˕U D:TY,/>Tt.h Uq1?`k)$AZRw+aD4vkSԏp.oogM%򉬩%c@!zRZIA)5 ֢$ Z Ā0F c*i ߈[R MCͮ|3֬%ivY)َ'7(ĪT spXtYL~i,x.@7@k[I,bdUrQ9_!=Mh4Y꧛CVR!; <{m=ޤ{X)S!€Sm|މ\ah*˱&|4afk%u~ѺBA7K̤eeG:$xyB )ZaBvx}6>VqsR*W55t9%9UJrkATsAJQ+"ܧuis ۠\ DHRQ"8`Hе֘aU68cVՐR Ki))`'"(Zgx@,֗]b-@P&Z\w-ꜧEP G ۠'ĭԯV\J+VK BOV*8ˠ  LFO #EB<!0*8ij|PSR5Dz]Ϊ#ɤ)<s H+%f@57.9k+1h a̷V|C4RXY(3mr4gZeJ"ޚxb" jQ*?%1WM5>1SrlkL9:Y{"4LV) J8 Ls$eIaj լQ;K@hJEB5%r {%2ë1 $#(TsqƵ ƘyJ j!_Lt $DpP(#Ȳ]$D1{4=r`JŹ@`u~(@ZU,*Uh8`%g^V(NO }vvʛN'sUzI'X Xk#p`hw@c vz b( 7DR>.կi0%nR"2o Кf ns19*ձc[ xkPH;$)aMmPh#78-cG'PDzQ8$W~ZXd $fJ4r:@'Cdbwq`DY]j a< a" 1zdTSH! cK:Ca-КOd+a~CXҡ:k4QyFÚ̖bA(56)"M h P[ @Az[>oocaYT_Sѥia=m]yۼ\:]1uf-cFP*nnYGŹc0!طʿ;UNiVǨmZc}m6J r9բFK7#;@Wg 4fu($I x XklK e0Ckqs yD˱ZLwT@=ڀS!509$diOHRnC}CX 76u2l{4v"*OgWYW4#S*J#\5 \uD`~EZfUbb+ZwZ1dQlwv:yX:RXD (WNRͥCt.v MY9۪;挙Fj}҂c z@*-g!]I 3 S4YDŽA>0_{{> ;JT~ܹ5LToC]1-02) mtڞ  N1p;^cIUzeѧ m5DgWlWU#l܍ ݈v2e;w#:@7i2kn߳3H֩{n5 C=j{a"W7u#\v}pg y?K|؝c~ y *x^R39r!/ZqhU? ^UK+-&c5j8uŧJ/+IkIKn7a\_Jt%%]/o}J^qhFi[_p!E _OډF}6>Lj`v9B؇5aLFV-NSūxHbn%V*"2hJD.7QZq$ ; Φ quSC̊:FxXͩ1V lamthoϣGh6Ǜ4-ߏ'f_F33~̦! 0c?_~Ֆ~N+;)~.k1t.ۧ#%};ӉZ>+Q%>K-AZS\}eX+2G]e{Rb"W"|9L ʪ .exŖץ}8i?u}/"K%If8)sh_A 2 r%]RxDw.S^,e éADn%xŖIWȩM%\#33ZLq3hnźr2r$t .Gi  AJv_Cr%&]Ԓ>VR썯ȝ&_[=vVGX jFNa - -jlE_'ho4ƤOA p}" ԧ[MKW[>lȂh TY\(1]mve :Ֆ) N "oxHKC^#oy2B:OF("U=7Ah(D#yɈňYL}1! ^k\0JAa!)HTPVeLΔsV/sO"' ƈ@r*[4" Z Q3+ 6R#y=)PIbI-I hV3b[P^Yx #x(/NH:dUUx\RKBڋ`)}o$CA!G $#kQ B:l6ّ=fL¤PQ+if>'́# G (p,\:&<"M"Đ&z:Qq6onnf8f>YGnkS.0 _N/;F!6yk~YV]%ą8$,v6HRUku&mRk,X5fEU+n۳h }l/NjUroqtlڼ:YA|GZFWxZT ե:tE@.^̛C@I^VeZ Kx96''iwUp%6G)SCp:sjJ%#\Cyntmzr9,(},[-567`{'%+,<k/4G܂GOW" wWHR9O!#-+xT $~& ?[DPű@09^$_vFO8 qg yǛ}/ ğO#WIYx/WO=~?iFP4nꦿw-@o__-_hƣ{/]ܽ{{~(0>9,[U?_%f q7 Wϙ) #yɋq[Bm7x/ӫہ;eSjup<GӒ4a>svj'6DYN\.%e3 ;ˍ2*! %&;S|eE9[afJrEq mtCnZ|w95}/v"C8#ꯛ|i:^>?ėd{9~a 9ŘFHF6 O3cZl.gn4+ \W$ ~f< D}뇾&uu[ސ:M/azpHቔKE$2HaAiƭV? o@<ʀd)W\U, 8cDTdObLM+ݦ/s{W)E8zA\^~TwG/M]Kh2v9Y!>?_i"kaZm '˺$>~!8% qdIߘx g"ni~_؜T5v =&"vi}\)707XX(?ѮmkY]oN*Seos[)2&qglϧCX\UIV#5joQCBU%t$pU)M(nHåyNEǧOu,D\b* ~Qw(i(LoHר*:ɯlxuP?NbF_jmQ!;upLu+-D[uZƀ"&nඍPNŭ5ׄt[ℨy $bp:+$/BuqLEY&<0c}05gD uqИ qQS|RL$'51P R-A X\&b@XgAJ"-ApM53L*! V]rN"%Cxm4F%xҘ/L_[,>"$Ie"rZ_d F'Btf)xGzVŞFm>)-vKdΘ%V悻B#,rV*GɀVYFwЫ;Ah71nJZ5HSEKYu}d#({oV*"N!PкL??N됀aFD]M5g]Pw5+983N͸KLXwR\\ sD,Z.31 ɐT5Ux;.f#Jz#Lxs.2Y,.OU;R$1gL*-H "8qh/J+":dN9&v[Ew@ }>Bj4ے f)Dz363X[eqF[QuAI΅$a^AvU.'h?z6/FH0G@g'|HEB=a-0ltB0aqY<_b{'l0oe%MS77/]&sS FɆO=yFNj)^v*m1%6zhTMp lq>ؠ⭂S\_=+rzN!" 1IP5H١"pRFqZIN4XVƽ̔DZmyܱ{d ̙@H_AMK-{=9̩6S}m~)Xڎ3FA3ϼ:*9ۖfVZ Fb6+5P* 5#QŹ90I+5F>`#- hB+D`Ҁȑ*c+g -6 @o5紜z"2RhOK![1smR͝j" kiw,ȮFܝaH#-[A981 '*=S|^rS}ѻ=L>G!'_J9q0<;#ntP jRA e.ayX!sU)絭VI:X1ԮQ bV9Z$x4>gA`,\-o}? mGpf?<'f1I5r1a+'|t(?KihTCsy>1Nv6vwt`hlJQَ})sPx>| +/i&JKQ:#wB=ّbJl͢x(xLE ظCɐ IK{|ܺ=>(_B5JGC{^-m|bLiz;Mm0aE'BsXvt nʆWܜ .ŠY;qסŊlM,Q>$H(J;p&3Q"w I)χK$X; xkzK$THy1yx=}Q! JQB; gXȇNibTFlD+L&nt\ʚq[^npm=tbPAIW9/BC#;Sl&WQj VJId<+B![Hyy8HSuzsU5W^fuvyL(#  H[Z >ɩ i_nHmzK_jS_vJPk5uwPcDK=X{*0'hhE_I*Yb!@v2)'Oini shYpұ1+:lsXb:!FX6^~Ȍ-ŕzمaXS%ڒtj(~:4_X.e`w- )ө&P+B[AM(tgN+pgp5|d{G%V٘TKL"Ĭю\tP{+ \:_up& pVmY~f9ŒE? jXe jWtEχSp(.qZѤ+RQd>HREf4QLGCDni+ o e]>˗7ËJvA4Is5cmp!<?$ xL'-/'}O\>j$wm=n;tYźS],v N 3h4cI/jiF-%UwZO#cZ=$"YGTvBYF孽@&Z<êJVǼF L@!]yK!@!RFF0$*[0i-jcwyMI%2áG>9@=&$'$6 G-6JhuDrk&^=t8܂6zT}c#mKA=J)lq>M6Y}mAH jf;"N:G;(DԄ}!jwnm!k{ﳬ;4k`L֤Kg{AYǾ}-.Fu r7"3q)aeR3,,s1O4L[KV( RGXԊaYY}+BB`w[U&3>q2؍}gD4:^҈~nF)E#Yx'ΏڹϽ8@XDT%=1jlH*D-Sr%)DF`UY]hKWX*ғj E?_wO2^tdXKP6!a->kc$yXN%ZB/P턌jz/ ](=Ь/KwD?&*5?_L^B[j{8 /{xO*OFOdň܇[Hc/o߅FK>YR1"FR"Hg]6@}$|=1E,(5T")R*sK̅ 2iYr[f!Efd&MA%c[dBJJUɷq)S RiauZ &){dP+;=d[cicl1 q޻r&~;dt]760XkNn"e&g؍6ΰN`5tz6㚽~OU3PNrR2Cw&2v}- w:x^"7?G>i*Vj Z+bع0@Gڇ%K)q˖qGgrsBJX3jiP^+59fVެwߪȶc( =jي Aa͍kU&Ii@@(  4H6IkI^sb 'Ocq,=BVK,1\j;xwm7 ?ZL>ʉ"S0_60Y pFAsFF+yoVvqߝ~GV86A T j"H񶖿d :A%hm]CEl`8s*r㤋1^pgj<[|yZ6' uchrz7ZP|N) &&N)y8?#PW诳S%+Ew2JTcdv=eσAO@I18n)l–+HIl>epP|ӄmD &<тUͱn8Ɵف`3-wd!<80sǨ]>*Н!'$08q&\, ZSbLrlzz(Yh 臮p⍶9bѼIg:T:Fm}#~<͟G~`(k"VtR7d# 8,jaBC G!Mr'v"V"P*%}#ڐ+tP-/^H`)ùE $OU!Bu)+4t)yV,[k7Qaӂ ߯lP﬑1F6f-/ Ԉ۠tld"IzuMAB6r` 4L'l&mڂnI64;c_ʷ+9f}{z35F'NzZ섈y y> cs B}g;}?Gr`%/xf5(y0JHAӌgOl\zp`2qZ5ǚɬYT -Jm/ 薋OO}.&vjw%zpᔋ^pnEaT2Jy}Y{/=`Bgηu [ +~fTj&ϻcz\_[k8_.i.Yg3ezz?.vg1|iʒz` 2h$śq*+Ah-!BE՝MsSX؂^c%Ewfw:<#bUYV! *Xk9w!n?Utw8FMZ ;dww]+sgYQN? ~Eޝ!2nP!@`8bplN?Dz\w'^@=lJtn߱҂Wf{-:iFhLeȜK$Q˴*p 2Hp l*h2]UFҗBLE}7FʳSj>E+TLn#q'lt( :;lѝ $9My@el!~#@iV M9iΐZ4a94 '3>)]dK&ڈG!5եﻳđ\ U, w*%̞Q 7SGbkpeMVonnO|]߶bn˜ccu9(sW;+ٙVh)b̯(2OW?>vp {1}={Dz8 B`tM Q 2߈i bS1O4.*ϭ$ EQ:m(ߋE5a=-pSt-9 jG?gU/ſ,>k%!RtFm8md`YN%O80G-C.#V 3cDžB8t!*_" ',B%%e(4s%$I3VBpE(TeG&۰ P/_w{]OE ,M5Y};$:U.'3l U)$nś"ȕo-cEs*5B X2Bfz'I;fRbGl2Ru'VB5t qA76 9{Kn]QU21+)8*IM\(!pN%ZХhdiJga5-pSm֍1< =juo!E{gvYRpҷ8*1~V;q1;``8Aa\6 nbKkal@8h/mhD3.J?enMPYxYMb^vI1cPڜ@lA2"9-!ђ s`? CYiA7"{*/!vƆ0 4HĠ0juL,r":B1'!-GaT(;Zuԝv \ >P)<  G[ ߐl%HwEП&Fle(Dt,Ή 'H۱<;ݣ_meSxOj+;X?l)?XݝbR&RgmVG:S:,ӂ?v1x gʓ\؍՚_!-P9d$IEt{r)9$5hXƊ[-vtρn*VV+-8"?|g*0 SiKk0ġ&?}Aw%,BUREaH+ yRL!/QE7eRj"hz~ݼx'uꜬ;Os77(P7|K?mbh/g3^ $:_=z|w 5F7Y\ؐc3?hZo5)Wy7'OaCR{g!8;A%!SxveLI_kk%>jttǧBX4"oF74|=}9UOa\ CF};@Jʰ&TI6($>Jz Z=d1Y)w:jҰA+I&pIl{ݳA&Mqhg[mZX4a :h~"FS2O)7i9-چ:_iwL`bx'ǴOkAcߣ{}IY7TEV?:Q$ hR?MZg4>kqFE=n˼_ !'ncgN+&Ǝ?VԒGlE(U_XEވV~ {A;jA;Lqꀗ*?P {_t_ .)X:$6Kďͥd-? Q)Ofj#.hVNOI CGҰ)rL~Jir'`#D(ŒVR_rޖG>A&h&CQi1N܇>&|h`VK <_dcGB1漆'5N+,hYJت%,x 澻L0lI`6=ɎQw~<4<[?v00t(^qM(cLQJ%QLcړ SG{8H?)=!y+ΖIA`FF}ۅ!Ap*e{@j=SyRٓ%ud"9쁕.]͑r- Ĺ _R rD@땿S7?ry1Z<֋W+#o*^1~I'tǢǠBX/g| mB_k g+`h*&R! \`ubTE~$Bˎk0y%@2f9ɤV4{fFװΓ[u=Jw( ##b .a[U2hʔMwLqtcmOgf{;no݊\}X _k(v:y.7/-Ɠٳw2Rvd&AM5b3QGzPbA?E'ZArn5I Ef";u!2oͳ,ɍAo-vfrq:Z[ {6#Q|Xj7wήR9O G7fq^:5!YlԺר߹=)j {MvOTIE镪zao*{oTw 9e-%k'bs윺:{" :2S^c#Ti不6g˽Yk!w(%ʨoVK*nK˓(wvxbj 0`$+BEGCz|uMXw"r8F VzfWY͕Q=F b=,d6M}xZ^c1&vyv$+ѣes nRRp⫄‹w7x鹿w,o8 6v3لh_Ҽ4  _?7 ) 7gǷS_-lz)bo_7dۼ4d8#I1lEJ%z>Q2z0w]($2ѣYadQTv:$uq9Y.}F<."a{E:}{pQXakB-^ld<b9oS[wkC(}?b1ނ?KPM(T DiDİґrE1N%ILL"#6ℓ]0S Y  2rAqu0{G+ajҡJۀ0IQiRPls#;JE]B5 aj/=lRA>F/'鲾]N"cvlKEXd(%c:G<"M4sQ1Y - `ЉE\χ\5o6!-۪ Cn'e` EGWW[y)@Tt6)Px/`dӦ"|1&E?ح!_/حa͛حLtއNT١Pmn4k`z:~bsCfM}8`36̽Kk j\gw x9}+$ )Î[ӺEP>ZS;;~>[i(k #Dî󘧠=ET?BGZEyVJpu(OZRzԊ#D*"N!Z,3SAe uZ՗9-TI>$1ST 5B!hJdHpa c+e0Bt,50.WCsm")j1#065Rk)33 FaMQ{'%9o0e}JV[4~:\Rx!f5?MZh ܴ8b4"[V.FȦC)d8SѲ@⩭zldKgH"Y'FtrGc:҂UzaЄ#թUF$U,bFHTE h%<.^B*BC=l ^ OvqD64&m+ W#GZ z ³|` Tk!;OϾ*.zazOw3$ aPRDD y( R~쮷[ڿ]ξy+^±b9^.<uQ]Nיںtf 6.T侠:{[:ŭZk"{Xg-ޕ\8-B$j.DCt6U( gàme #9'`Q17K;XzP˟;M)c =߇5Ur_XTva 1 u Ywy^|ZhT^ݓ8^L[F`|*?uԕWPW^A]TށOfQʱRX8L% g G "&DF*V8%xjd#cj5! QS x8 Ќ/\ crN\Rty-t)BȄ$RJlB8{Jb#A|9l:fRpI1LT1b-6~II`uϺ:aKhJW::N'DDc]gƣ.[f^CfkgeݵMZ=] j]Yo$Ǒ+ ,FyE Fޕ/6<-qHlj$Q̪KZϐ]Ed.]{|s7=Pkme"0޽ݕvѣsq4zGiujl>3Mߣ1ntyܜ=q!W͵Oy)/ wu ;,W.}8!HBz_xCG JiB}0FK_v fLkʁE\m倿&/璑F\[VժMzLj &,w,b]qP\_uAe 9YJ㠫tgwy{C+Hxt?7JZUǨ/r߱yEqpn_sqMXT=Mz٩z;IxQ=mamN">U|$ ,2jzKqh~3v EtrFM}!OMe/\Dsd5{s[snN;h)cZe/\Ddw ZII;:Z*RʪDYr!gb֥NZev_陥eo4,-ݺ$ ,:b[F[&I1|B䩑0Khփ|"]aFl?+Rp+Z*CV Vj0(_3`3;<7 {: ^R5S(!>yw08405PugɯO7颶^RBs !4()`-pKh6Wjhi H/=,ݳ)ƜkfAw-|ĖLRȣ@og%!& J.!<ȇUXV6;s 2@EpdcՖ*A(}("hxb-DK3l'IWfK<^Ն[\59ZWՂ in/ {&4(] G7}ޣ#w{n08cT؍FHUG$+Ye[Y3the@XnN ș5fǚm&t BQJO+~I˄BB )v%1.2KYT%7["g]#TrklZ244H(eԚXEj) KQXeHƬf \Doۤ(`1ρ9"CgJu%ƒhDRjOP7*(}ւp;:;)ZMfZ+=LE׻yNs^S)wC0ʼqeJ-DRh"b"0(2!(c!D$ANf*=NVq49EhĕKC$@=VhP7oʃN>`DNKvKy¾dVH$>m9Sx҈ ٸ>KKWo!]V?#?}T) \o.4H\h4 \ʝb]szrˋU7u!*NƱ֪=Xnq~~\G-.0TFS<yD]]IU |{ سVq=ָ΂=_Uŵ~i+wx:N[nx{S/Niqzd5>KI%ןV=;ᝐ=ǚ5p.%4B8F -( B0ܮ-ܞeș1X=&<ոC@B~\yF\؏(ۿ*׫V+%ai: bMPPPČ*JC]M n@k%boSe($JE_ۈa%%D3ohI M'xi 'Y!@1ݟ,] AaߛX\|r%`U>ED+Xha!Q >ܖ}#i6`4Nbj|0AI2ڨ٪Aɴ&i(gtíJ> qBtu}`_ˆ 1 VȘv\%ҹnʕHU#Ha䐛 M7{ IZ3ǒZf'b;g,Ir);_)} u^xUB  ڔ5M &WD(Qu4huCZ7ƼXTM^&Js献Ȧ 0@$IYǫxqq~&!3**΢ϟ~q_v_2xoë˙~bLQNV~ϕ[ό[N^m w[3\zնx|Ÿ~Lmb҈p:&_I]㸽|z2h%lMw ï>+&ףGiR1ٚa`븴OaNtk%GJ֣GigD٦-s 9(b1]8(^cn *PpX}) S}P _&8)G-O#_(ԞsؓʻJ+D,+! 1+!K7Jj>y+du'm/=H[=a1j§Ҝ~?,?^|;LDK(Ot>GPH&KF&a%w59,n,m^0()9D` -eqrg*Zv0IX֎0Sژ^N{4Ј:.qIm"SOZL^ !G,Ĩ)">Y"CqISzb}(.)a6q}?&^*1edߏWW_(KO6c^%ؔW?EMLL;%?%.ID(ι!4cIͩB}mW%Id42lKU:aZ5b985)N\ɬR a- 4 n6yI!HEz"NZ E8:n:=p~=x9ѷeױ腿]})|J-OX5]U.g￾vwS?b]}姀tc Sw~wks OSZ97o˛\aۋ _MpPB)t'IRߜoSҎg-TzP{ θb|x5LךU}dA wxSj80ޏEFQ^W٣*@SמWN߸ bu?ӝFl~LwzQ]ѕ*>[Х{y=3 5 W3n;ސtv?sz>xVYv =KWo͢eYͮ%&OY&).'(6ס2dR b-_iSz| Drw k 'Z.%2znk{s_GnڧZ䧳UT0%{PYR,z8aR:NS%`5RPjvyZ`fWban.#+ ^-QJa9Wrp㯎5_`DvmUm{V7Wc0 eŎ%pqc)!zF ݬWϦS B\ GY,a+.d~vFPQ [J 7ut.{q/B!8_ϫ0CDob7ݴJ !BVe&W_/JWRI( j<JLƸUAXH:"Wj9 =dgZT~ wDxSChݦ~`/99u?6)t#Djxql*%O PAϝ>'((#Ob·`4)4a^*.jPc2NE [NP {>o6?jꞞumJan<5dY"G3zAJiwnߙOe' J6=y914b:"+@Fÿ"g=y&k\+no8B+YBtgoq$xgpT(<tNvcv6zwѣ;ξx 6>q={e[C.ȳ ̨jM@+1xvFOD3ΞԶ<YQè[qk;|1/'[,ˎ " va@ L:8qXvAY:j]g'ikaalymiggYKH2$ ƊAQǴ]"9¸s5YVzϧyaښ9ՈJeuͪefǕӅzlrk~ F(rIz.weV?&•ݙHό{1cܛ*._fiR"Yka.3yA2Z@CHk}")f)ռ-.GݵIQ}(tѲMf},'oa.?7k7|G~ˮsDE`TL`ps`42K5 TrCPR `)ҚT/m |1_]|l!q5n "--hHZ8m)g'<7Yʡm \92g/OSs~o^aP̔ &%,i~;++LKLz5Rݞn Ξ*Wc[F\߹Ko|3]ǧJtdNGW`_p!qyw"Oȓ1UucfuJ-&4y"?h[5_ n." 4o3L}58r9 2K2l nR` N6m'UR)2# Ms+сbdI@ru2Q 1-CB*tY6.Xݔ.# DbdJbDT&vVlt暝bJRhʤ:,l6<ւJ RMN2r,Z*sI`i P 7IM"Hi6{EK{q*iQ!1E;ZΊyДf)K46YBlݿ@DʔUwG @S4 iN s%efJSr5ۯ`9l᠙Ӎrr DQ%?:\ºFvF $c"O B+1~ (Pf #ʜZǂ3fRHq* 29 eK jlJϰS@y{ X'4N-h9G0%cXC$"g:q=3wB4oO+rž)jBJ `M'c\SU,SI!hoZZnn;ܪN!ۻ{Mo!HrM&4!IEX$e5"XyyG;Yed:?6ANyk5~sϤ@Ğ 砣3b &x,< xϹ1# 3V2*Vg>h:hgݣ ^o!Nv7-̜S auY{xy3Zf"jbƳGU]T7vm\ٟ=a4Ect7?rh|w=ЛQ7)_V&T\nZ]Io$zvuIp: g "}sW+Bo%6V\rw5qտí)\ӥl?tg9D)",pF!لT~?fwtZrOhʟӓW>a(۟KoWKǭzDfIy[=_"1Lr G<]ypI+G6(yIg[-R]Ѩ5dKzƺt{wcW{<XQo'>sBjCDnӝ/!J*[A, t#5*K 9s1͝`9r ezOO^5+|aY=u<{E6_DQ쮥RMY_>XSCϔڸk7GAADW6*q &N':h4"P,ȞFT+!XTu͗Awݻ `9|=>X~X(--?(`ANIDFmn1@[=ƶZAcK/kiy=|&;0LaZ5ͬ) j[;_ݝټqIfnÚ7޷aӝ߆1IIԱ 4ӳ}yG{<$k蓭vkjPd&pxdd$7s<$OO}BUHx(i#KD!:;XǞ1hG4c/qqPQhHƆ$4=BcG]nIl!?cL! 9;I^G%D2Q1#. ~WoyyҘd$\S-8K&ZOAlz?Z~ K&Z£R{轱HwlS h rkg23D Zi,Y T%aj#!B9 k[}X54~bNʼn%E1G5mؐv^qO!\9g!b``fRsS"PVaJ$`e,# !'ՄSJx?6XSHO0@"Oml/.8!"`CX%!9ά!L v2KsקeXI͜k+ͱ !c^$-`Pgr(l+RhB8, jbِ^;&I)LsȩQVZv9Y3ؚ\K_ (%@J`{4[54:uM[Κ Ym\ fX@fZe1eKW!<*RfP2Ɓu.ShFePSw ճ|>ج6q+ dUc1A \3aj={0 lH.h S ϕX XAo\b"]aɐ`}著-^jOJBZZ4**q%fǬ4-**AcO`JZ2E%N$Nk%O4M{FkBJhPc^̺Zfڟc5ٷt4oEӝ.+LKw4x7nuyBXt/SSv0LįYLE%FZv7ڙMS\]׌i77]o⊮/K͒{JVrwafQr^_;7g dW; F Bi:cԑn]:L+z[B}[7g`7 ֱt 넎QGw0E3P_Y40n&݂Y+6gJT jHy.2 N\(eZL[PƲ-W"WK 4TٙZRHkdP4H+ ja) D}`q_s>%>CpKRjdl%<+SiC,=r#7ƌj\GwϾxn΋ Di) &T.CRI uu3R{*A)c W3Vh[օr- =1,Z7!XR=ֱn{حѶ֭ y"Z$S2̛CMܴ" $:p֣ P:8Ȭ|>Yfe $7O Pke PHV)VkU?L֫ţV:W.%2eM^n(A<-A p=YbD[H+RaxGԆY&MZmibŕl$kAA9 :X8O"?]4VنO|[5S7Ri.)0PIK`LVPSk_eC-a<,Ak[: T%|Rˀ y"Z"Sȃ$ Mᾐ X~AV]});W.E2?\nRF}! Zu3V%qw֭ H+"2S+8j :b ߴ8Ah9/#qEs<$0P:Ƞ㘄7W D+nDГ&("uxme@xdb :ZTjPFliʡƒPxmla=)0|\p֖ y"Z$S(e3ز `1hez( Xg"(mӁr-) yLi8VJ6S:m92YbDZ.$䕋hLID+o)H1ۀpu[ ;` sO=\7l^Duj|}^f_*ۼ=rXm=E of{K ;F O!cM 6+t%!grFĀU) dC(FTL; ]BZ ~=|q H2 hJQl]`T3#%Չ42J\FAG_UoD1 M[Q ;£ukX#?9os 7#t2vXg>T03Ot WO rwReߋ7>pS/HyHu.3LD;F?O?S74ۋ N.;m~Lo~es\dݻ<%.&C5ɸa1pZz-QwE?XHe,êy+Kx$?x? ￘D oEdO/T{}'cltYϬ' gsڥosH?O+*ˎQX]d f22 ^XQȒl,im%箛d(,aNof*c`J KҭZ"IfL1 KXbMɔn4E6$ۀu)XՅ ޻t'Г.ݼGE$}e*z{U, )]x2xf =X7f g3<ќĭPY}1DirEZ•HN}\716a@𗇒PwOvޭxCм>gӨ1fKغB9ɪϫލϻfm8=qg[z7{WdgQ'vZypN/5Qc~vI6Or!os_tGݮ[p9\8 ?Jecp%f5 pacąe?!\ ^3qřaRw:Ym\$lmL7Jsg0+ u}gY'Knm_]WL3Ϧ5L7k/lUa^Pzi+@Ff=Xwz/U_m g<2[f[f a 2xClƦU1cӊ=^73$-PlyETL^ jCe"w"H+cJOV}ʭ-,]#b٢OjŜ>9 !b9n)o| ] q@EisԶ-$+ 6n3⯵ycxlG^udeǠ~zG-2GԠ8tݻ%#÷j#K" z\xvHqo^OV߲p[@Ɩ<#&S,ANQ|͝=5o.0nSj#M7C_K̝Q5$yi6:FQXURJFۣrVz/"r9rn}E yU'KFv3m5`kEll>DÅ9vd`J-%Si̐=P^AV8/W|IgoH,W&g`Ƽoy|._Xd`+u#VZJP0L-ee%.{8kѾT@|Vl%< 9pGM~8io+/"cTf!.ϠV?ox,lŜe"1[du= 9?Wظi-e<&5)PR}U'VKkϻΠ־gHƃWLĴwԊ G!T1(@ XV\3>/_hFJg"[*% @oAtFWVDkλm~3Wo@sV Xghx>x,=xWZ5U3ppЯ+& 1:+&*ο8z݇icB9GkuWΗ[f7 wV@qz5yslqts1$x~TcLE\ƀ~+6bi.6bi8[Wh*CU*)V1`H鬵'|<8+|4:Y+c_ǽUZ ntXSd'$ݼǤ޳79M?'a(|sԲyvޯm.*lJ2U+܊dA 2'>Q|!cc %5x&1%|Es!QBFGDӺs{}|FiR3}B]͙}n% œTV:G?SXO[==v~^F8&pw~z~~'<pt;xɏxv;hMȭ.3KrHp=bӫ8[{W3Jt;R%e6@ʃGVIw]娤o1]G e+{BI8R֒RS%jZ!y)K$2zZ\[\H?<T, &k ii3[ ?2J}UwRDN%XEM~6i+`EY "C胬4#m՝bj;Nђ,TUBklzSyKa( 'R!裢X&=-&9kP/".% e0{˦J_됀lT2ZP2(SHNVʦ(bBa B)gJsEI0p[k/ƚ)mc <AHlcGeFuNT7r51i|E1)[GeH\B5( FensEP;.[S\Zݕs(Rq,H#d.*gde6GAr-\&J{'bsuh T]ɣD-\')Iڲ&9bmܛZs`OR%@s#ukL̗l6[OuၶK4BqWݖd=٬qJ8<Ӡ7.~7K- C @3 Yf*$UdHQ Kr(  |UZk ki~ET)Rg0AH"90ʜrSHurLQ@cY?BtqBJAwkj}4P0v_TKڿ~>."mU͋-V/WmF?^g}*zl+ڜ y?'`IsBsB;9t:':'Ǭd_Z i;. KʦmSG.v‡J0- N ~q7sSBz j={9OX8r!}զLCOo DO&nAyܗq-'sxGX*{Pa>HQb_V\3N 橬y:5)fAbWd(aYYcY*g9 ,"TQN;8 :/(=Y܏xɔX񲏕^Wz!w3k v E۸ŭ=x.P|[\]#)W'ַFQCNp4ȻzJdq]hvpюwڃvNrW_'rOu;}{`K̢ 䪻+MώWV]V_ D7jޱT-;vx58q5x/wmjPlOjh[z1 KǦq0ji,5靻RȶHyY4띸ϑw 0yN b֒XY?sڄ敗r*cSh4,zb;2F[ֽY= QS-M{b <H ))zeJp֞]σRrs[YD{&=hHv|o~ʊq:?!7B䢂dgQa\Qa(xoQAo #78=Ef eͶw|Lu~0[S'R#{B@#;%[ZƤ)dg2g9UR2f) `,rH,˺)먿;5¥Tۭ\/b]=0Ww}(sՒlA(jB}=ŎrX5)T)i1(JDN@P>v#/&Y ތh%",Q`ad7L;GV0}*0@ a b˺lHh@ikP`.t>rB)j[ᵈ8p~ןfO ,Bxy~K~_e+)oKvqRHƹcjsV"FXSB$C[aq`a.wfj|b7H(+H(W] Pb-#hɵs 45u`,>?ye'4?dJ 9`0&PY .2J`Aeǭ}!˧f (Ĕi=MNH5&o|DJ7=ΣF:>HN  2l2rR&Tgb,MR J$D)ѢaDށ˾(P1߫5Mg! #: 'h;*1A* Ë}Hyh= J'71oA 8 A*؋WxMxtpD GݦSAk"^܋/]>8,CH Bk 'tqSW6d/og:RAxf"nB`3jĉX#Xf& \t)yߚ$#pnSF~(o6'oJ < AVk`w}w{}ebZ^ΓtޘPz۶va59sA_-Utk~Rm5"^Mvg弹XDKO=f >.LY˧W_5̈Fy+ͬgG l)/4vmY VsӦb'5 F{iA<j%oY M,[m1C\q- u K5yP/1x2S,̶Rc:Wi\e3OL#a}Rd1O#*{3&ޱ#+=jI<1rZO0?{<'k54hq/Wtã^6t.w0jc̈́/Sc&FLDV`!܎:E)'ߝVdm.` dh9xE8s6j"낧ek c%_\" EUEѻ22; H6䋭uf[Aoώ(է_Yfa&[!;|<{pk_Oz}^<Ηfen~RG҇6 JSqމ;~s'Aܝ락 !#H%Рw=BѷZ[Nq;>0s g*هJ c<>ĆcE#3&fkrj8sr0I:FWH{i6 w/B^9ވLʧySi9 '&Ǹɶ^ld͵{c$<.l 8.RemW b. %#;^xҬwOhȉz\?idl>Vz?FZ~´R+=a҂/Rt0mX# kDt^爬h=8G\؋ed4tVaJXF)f9?{n3P8rF7 CQhDzV$pKMi:0%NF$gCXlߔh|TYAHeһMvЎ)#Z/A+eS2_-HߜQ;MPІM5)$eJ=ƴ8{3Ў )@k2 d1=&@;У. շK0_m~|rnX'5y?A91,E;) '_#2uD/P f<Xt?|5@h-U>z8ק&eQuJUSrs|ʞ:le?Xho q_LN ? MP bW\O1,7"%Czat>-4d6A.g򩟁OAIpxj4 #$k۾D;f ɏXKAq0:e;dahi*ƇNiF#X/ækf`hVޯ6hA8I,:@uAѡPTfqO=OtpI%eJ0Pjc¶0FuMB|%jt@daCA#L) 5vGn_-z$9VDUA{,o)S+$Czh퉀@Rko%BQ<|]HZϗ޵5q#2ŗ= )4P,m*urWIJųŎ+~C5$HdJ93@4UB8<#vawW^|;?s"PyEZJ%oS GTێ^. EP87ݻOChtUξ긊pZE2u淜si#aO"^O}wW1S;P,hmpY-(W)j'`h29oR&㝿 @Bm=eNm #Rj-`%w 7^M}ESAi.Q9Pe-)yPkLL.xOICS)PK4C "xB hT*ϱqbh:1Pd +HI3Ǐh@sK,HMdIm1o[2 S"cG"(5-į`@n~qʍd8FՊDK"$AіAīj+\q(y8̦A4/~?@†Oqcc.⯳aUߣVMG +[ GÞ8J9,\ršF.H)XlDۑQ$ITS&9gWwzOgj2}~mt8o_Wz3^ڡ}~K{/AC xG; ϳ#;-ǤSu7}\u1* _ty.y.7 qM pYǔgAgeC@$n\k]Ml<aCQS#(l~tXif5F?U?7M |  ֑4.)B[ txw 2?y`$H4Y U9#eRiâ!F 1);2HEI5=^fEQr3;-?Gx6s8ι*#3!C_' Tb ߊ1s:=O( Jۘ< tFqRO5Nx=H˞3u|1n-ӟ-rwwqm zrz UN y&ZbS냏aMЃ`R ژNi%PQtV,h[vۅp-)}95vcVA)mcv{<>< ږva!/DKlRquv0J1hc:mlnOG66׫[.,䅛hM5ʢ:)IJ1hc:mlnOnY$}[mmMĦ =5vX yBv+Š锶 =洡}[mmMĦ0==&g2VAFaO떨"bA[v`!/DlJ}n~ eKٖ٪“B6CBW-䅛hMcKoݸ8[)mLM)w^ Aw+-B^nmSBpo_<'9DgvUW /1 }whzowm /Z /z@9k$KmSn`Hİ%R(n8Χc@rb̋*e)>wpoRÖѳ :+&C |uyuҡ.ɪSӛNtAogABWr\k3u延8;4ͽ?1ё%Ft1~h%wlVo|xux׫>78\^n> WiH={wH-';q2L cD6?Xjgùvwna%fB"(~T;7+T4|i^żw (DqɩaLs,r[M5h@wVD)N ݅'7>wu7M-c02}ΖZ$!~=Q.t=fw=R[H1m-$dk]PmA %KQo)ᾥ !e{+ӻDBũr*An['́Hq=Ox01^Ȧ`,!ZfMS iX%ݲM-P%SdN8+OȄZ4ɔ_H)W"(#= >8RWXVQZcL;{%[^$z?OO*zziӫA"Ơ FG.*#$XN?iIRx׃pQ7IFܥK :iT4@GzUIt-3P]r`~?@]}h|#w 9ꄘ,Fݐ;/>M? | f@4򘐳?e}HfY.5l ??DŽy .O6tkrԿEDŽ8SMox?:1꯬kKm2e 8zo3VtI$# &!iWlJ"&r [ȥ ͷYpv1ppN(\PPᚫ/CtP4 8QqfD|COB(O5" d\UȊ>j˕k\R`L2Dx8RqE|6cjl-wa'8KGƶ ?q6[",Q8ațNh>]񨢄pc E08Fs$\Ȇ@^acjl-]I.k(癨܃< y5՗O&tn({v$ GyAtZ g[~v|:8{d! ?̦qVi *`GQBo{4(`AӘϛzkosQ,:0{GgWcC[3/!/ T jB>Z a*0̤&7XDŽb129 bOZ 0bb]!J>|Λ&n|&7J֨X2sO`l /T}nCFwǦ=$ ؤ=3*vOLz\HN4tpÅT=O Z]Y6RiX{  rKF j{-w@mMܳzW۬g/K @+kFOg*fccgB{g(/7)MAV_$n K&2t qNdr?/m}YIx8䕲TW~輦Sٳw1E<]!#L+ͬ rW[=!w<^fؚ_E%gO^IO'KEALɒ=a%7y̺Vρl'} j}Lb+t7i>}ߖ\?L%T'JnJz^nwaZK]nY_YlrwXr'x$}O].hV\tUx@ާ\zU{lEݫB5˻s%薙߰CBXϕ5&=_HRBLÔ2 ,BAQN),]{fn(QFqb`EiBy^T1B*0֊IB0ZǁVI$U;^(.HՎ)rGg#:/V%ŕ3** T޺*qۉZU;n;|dV w5+⽢42Lah|r 8gyr9:xpOWE+%"S}kW{u  $cWgګ}u#j>x]*@z{w~޺l?N@i҅^Cm¶" @/TqZMcKoeZ+XIR^-ZAUaSgCB^pu b7 a֗_`82puCwWuCgnȌمS 8o]i>xZ,%f+|\Ȣ[ Ɗu9,#A;-:k٬fCp:-:wQg%SMVv{UDfWkjng֌WG)ak X&vOKR 3znP/I̿8Ja⻳;O}֟%WJffI xd(|$dP&~0RO[-+/S~2b̼nt ֦rƃ/b[;[V+bJ-_; Sv_JU{,XR{[}lȶu}9fdx ؠꯜǂ롘kCeaW\!Sũߠ}8{lbi`pU~y7`Am̠ Z,<|Z2dR߉ToF3y8[B 'mw#W\7hEjB] X[PIʚ^(K>|1.LɩhA* O TZ+@.qEyШ󌕗ů2ZK _*anבXp"jq=I*# G\YyX8Y!]i cY($zU)U djs^:#sɚ~~ol,|ޮ4O_ÔP9^P8^H@`D`-s,3v~tKH}J) [?dUAu?osч,҅PX@N)̱ʏWrf}Se*,jPf R5ϣі{ZYd[Jެ nbSƚhj8'¤cП= SNBǨ?{,s10ӂ@.{U kq$0^HLJGQɧּg@ )4 ]_@n avu?00d%YXBۢ  P/̱ g5̴o( X!lWb|J}6~}o:ω=,O=bM NIJ|)@O@s6TϱSx |^!^-E,'e[?fߤ^'9VڝuOoԏ~-r/mzpJ>F[X<-AqcdI2&JkUumDcZNMLyEb,1g:XMK)NϳQv%i;$arsLY,s=Z)2]\)¹+kH@_5x#вO4#ÿgލַʌu-NҊܢPi`4B_k Ik3?y~x#>~|]==4恑oRS?@n4@keF: h3ƵÜ}j@mRrՁ@U|xtJtՈ1h;ML:&#~Q-TroM5,w~xږ;=y 0J哯_zDy D2ڜ~[f|ϬڱBJ7!ls%D8jdk 2@/" תؓ7L.YOy։fRswl4iO,ܗ w.EɎ_$;~Qpt\mL~ʗ4ရ#UVq!K܍FUIfYZ]]I $PP(  RIuIJ bw>zθ027QF-뭡[u03+&3UNi6QyEF0V:G9BtȔ&| Hs[Ȥܔe3iv~]ɒwT?cݒ6_Hnr{V`++ͬ:(KG*f<3>$bj~wO\^⦜|ץP]Kmh#Zhh@u 7sC\L NpU }!䱩"0RAQr'9PgLS;JYBpJ*4nŠڸo)R X.ߓE*R 1kF?~IIP݇N!gL_/O+t_{㤩sCv^ą\0.zF9QOn.!c@nL緜`W`cb@2Jn>B=@bk3B֜$1~Y%˱"b/_|E\H]o*k8Z8JpJUPAU*}D.R|E: Sa 0/힣bD&bl%hJWWꀊTgŃ\DUqf:UQbJJm%$3k7ҤLfXI4s'{T`zLń`j5<ר2r/-6VáDpj=e OQ/z&Pr/t+c cY&RA) `VһXYRP4cI*,3cـz^@q7amڿ*$Hw"c: U$bz.o䜶eƿ)>y+'ߝs0^ϭK2w|~ի\⫓XO~9z= d/'?QI#伊:Gj^dޜ~^β{_tfystˀ i#~Ou0t)*/4{It+/6#5'MO^.x}sFH,99ۓW^9SWrT:7&r.o#ɠXՒAt%S ,ȶڱ@[!{xTt37{zY \y kK `w xQ <($\;.*K$pZ f J\uGiijbVFMD⹱UVr R>}Q|]h!.4|SW^]q5==諙8 ^;&..=,87sz<ן=Tg ]S* 3gSδ@'q~8p5j`~^Y00`jn8Kq,Ml֎JfI$QēX eZG]VYRJ ׎e*Nc]_8[',r'l#+ݤ 4V+.|rW/N{"r&eK;fBr!#"*8VBɤVv> L"In6ڄ<\ܼ4Q;ݚ"W6;-YF)aɚt&#rtvL VRR rJ~Q񵌎vK)bęCdf8ZJɥ(1{;V4]ϕhse~DчGUjF&xJ|qӣy=*A4G^[Q[ oW4?$/x,Ysےa+ځ'|&m BԎYHDD|\E1ޭtqcԌ",oI7<3`Tg\cx.8F?Hڻ>p 4=@SN` 1I.>i (}.DbNjSl lʫҘ@NiL,=(}%ʹߛHPP&p..fh#/)9Sus6oNùh%CJ"fqY[4T{F;Z>R4Y_4rOfт?1w QX)kkџ-x~x)x;:yb;/6Hfh^ Cb _lp _4x8Z&Cl:(.77.~ZI9 v=crB;>[?WaLgs9 !KBIiX t йJ yJtbG22;"I:cz]a]i^'u Tf]i"FHQNuQ!*E@ J5tcDȻZUlLB0ǁdDL4${ D]AVF%V,@_ɮUQB>L(GD9$KbI1OT2*O`[ LT<%$v)$rcA#_RH|Ȱf jiP)n9+ jk;\j;(rAdyE༒oe|oO-LZcNYҋ->=:SW6XضG ǫ~Nkiy?w2uQiIN̠?H?ΨFKNf&@i6|t]{YxurzOՙ]"DƳx+YF32{uH37]fOk%4O?j`1*ޭRa5{AI4lc58K_CPd]ɵי7gݘoc~u5*i?p:KGk؝8|آOѝ\\,>]zXp N@ݩFz*nAVC$ m8b5C]n,OO]7k_ d,L578UpZ!7Sc$YKUsoٻ0.yij9Br>$M'k1z_垡"p˂0{G<NJk^Ok' q9ȅ3J23p/rPȐ3:+ȭWN̢:XW#Bp 2wofdϻW(fzս`3y3:~+"՝ŭ?җR~8Kq㦔/N/EG: \q%=`"a8+KVRZ*/@35?ؼfˍ6B/z=YDWt=lEO4t~t++3HNb0ߡD#uj}IEY5gxM dW;Rf Jr}'R J^9L)/{WȍB/0[;<=;i#(J1uHnGDuPv[MV!K$2yТ~QaST⻾=QF}vODP.9Mle 3t !uy@DIͅ0x{ x^\K)[ϣ.(v8PbCCe7T+M25dwsx{=?Eݦᓴ؀qi4n. Z6ޭ÷y]{}b o)8Ѐ{L_ys>6\lvC.f(ƶ X vAQ$!ݽZ*6FTX bpWAS1i/^;N{ %NDq*p8 BN@2;eJz%w=#1g0$ԺBWJY 7΀BVmVS'Y' hպ߇@e\nH@烟549XVR];p!z}ruhlUu]Pq{'K+mڷ8%#su ,9^9%KQЩJى?/wIVYn5<]S@~tN *<3=/wQEnt63|#HZq{#!cA %!c Ɔi\ka ]ZSzN&"[pԴC,s=f@=.KB*m=e)D" a)4ՔB4zyl3+juK U,$غT򅽙;؛\#D%M8eJ8$*!pVظ/6&aЮt6Vbm)ݕ&K4B$:r؄Ha  \z•xNÆJ@)@Q2hŢ{OM\Iü6J'jALJۓx,ػ@Fsk%4 ީZ:F ǼR4TJ.Jʵ0rW(@8ҡ#YOЄ> X_2MVDrL4.yCPW_%dVRVcT%EƋ.,%҉&D"qC cㆍ_7\tT(#Bhz"рM AσPJ4T3:ZS0 ٽ y _噸^ɘ;M&6wgĘu=?|_6Hޓ}q\RuO|_)QJҖk MHtj##Qa2}r(#t#ik 0BBp6\<8'9ݧ> r6ȼÙ=2oؑyet22/ Q[|BQw3:hl,TޢdӱB&> 6$LJF>gRw` G:ЃAairiN{׫2cѠ} 5x[5%X!-pAjEY{BaxmOv&[xj^qGہQ6'OJxHI$U]ikw{`Ѫ{ޓ3A[sEbfBB8,dLkO2l˰ .6lo`A^I*(o(be+,D碜WqG}Q`ܗ,R`ܜ.1 n+YYJY$kP%)XO.3˩bB 5'fZ]ғXi?~=u?lvLd5(DjɉmW3@sp 0,u4<.qQ(s DHQvu.7E-2#Da/J*}Y.)t\ QZEY* f%5$RF H" G9@9)jOmiE 罵$Le>:BK&e˿^Vjܨ]"45Z3pI^FG=#vtx]x5o~ád l'/u'U=0I1GQڞ}#nI|n~i"Ikn@|v}oU4gr8Yf!D9gX^ YS~[o^D×Mn;;[4Ns%3 mb>걘 KJXw=FBv %!x CQU׷o9QY>)ȑ::( آcc #o5m~Q[{oMQ^/xIiQbR[&0q̫Z1RWwh-v;>(͗_~|?#"4 {c9iC TjX*B?M} ;t ZОkN[nzKѱ-'=B0d()ľG6!c7Fz1祷Zr=kGBJ*^+"dx0Z0a6{EdExTݚ4t YD%W }k¸RNWc*f@̙QZFB1!8%h$4#)2IZͫB#RLL&#=@%l=LO؎ \Վ[)K Lan*D>Lua(|>-Ъ.VyeQlAO%rtX[zʥ B\__ޯ_Lm5eٯqn#DWi;d۫@Ƿio7~ݼfkxOHF|vp vͷ*$/y "RoU o7KӬ% hژ戀D\b.*:`.ŵ!WaV#-\9`ז--_-rc/`f*dm=Ep447*v)fp+&VLgAfU|qF26wQt8/C+y#ͼf)d,eQJqW\x{с:1/HDQ#-EG<:SZ)g Y8MQ J 7`-{ޱ;/HDcȀ'R6q[fosWvi1ï+Oh?{WO7k;ߟK}74c+{m)i[ Qu^83Z }vFG9gQZ#"ʈŐSrPRFG97IwM=fɾ};:]a$y#1(w#-{2$2oJ|XKcoDRw1D7c{%kkh]e몚#ؓMxuɉ/1\xi fxE׻}l?jy|_ ZZCa@U^8( v#d[5ǽI ]OՑ1 _$1^PO8P0(lҠx5@݉g,@|˘$3:K(;fIAj}}⹼yi i0@锎gWIxLw5`iJ,}ttHi 5*bUE¾jC7x ce#=Rk/XTZ{9b>GUWW#,En@jӡp=ëwZMUyh6=ꐭ|8|c'NPΩ;(G=sB,&ěL 5e$'ZpGeeq ˴YS/e?QrE ~$;uV1bJa$l缎 F`0If+!XFŻ\Αrr` %o<]w/ޓzowz1XJilp$&yqKY:&) 'V)=PL⺍?e\ ~8Q2L[Ρ4XŔcČLf]!&F .ʒA* q7r+]POh.K8e;%B Ԣ BiO=w.AQ4|0*ep`r!a]c/xm]fWQ ]:V^*X.}&@`Ƣi~ Iؗ0lEDh;G&8&#/r!).&Z:"I5R6t/ K/]ʈd/ 0+<6AO L{ s2uPhWfƥ`m#hMuv)7ǟۿWQ8//l>[k4 Mӓ7GۿFݱk.Kۡ=݇OΛ~r/.W+Yӱ{%]h4"~qqn^޼/};7_x]\ C;=uIv(r.ӑ EOqx?ņAt6žꀾIA#"8 *[ ;שJ9 Cvrpx] kMQCM-BTgiz }DY)/_iӷa60WR}GĚőٷ1[ ٩1L ^]WQ-`Ũr>[#$O03u- U [X@I;,ytJ91Yt1T8f~uT%Ln FWnX"9SA|8gv6bNTՐʡ;OFj^.D+uyxG D`nFUmis3ĻwۧoBGE%W4qq}QI4 |i`ts{6̈ʗ~ _f9)m|^Yqiz] \?>MysVY;E~8==zP0ljOϟPҨ@B?ݼGn8O/.wmo%^b=(x7_MNBi31 oo;l}kQۍAa/(Ja͜^+CP,YG<;MRJ61B WM#5RRnQ%\t^~ ax%IcWlEǨdz:-1/\XS2ݎi`jp$7=PZ:=vL>:ΟQhs/]g\rLH)0<V<0J3Ϫc*!'a2+S{)e5̣AhEjyVU9rPIČFifPfD"$#MLl$n ǔK,zG&VTr497^|in;-$Gs4tQ 3b|lw&\f6흝ڐ]}]`)F/ CڒǷRJ, B_T¨ߔJJ": UvQnI7032e8HEhQ%֔JJJ%Ì!p@ըѭXL۱2Fl>'If_9st/gpPE ~pșmBnh[s *c๼3- %URwl5>ztEFqY%lm7ާ67> Ċ^b][}&KV~)` Y&V.|ҲAVR߈ә9(j}䙻N} hRNѷw VD 33|f~.ݔU S09 "PAL(1Ogzb BFXt,Eƒΐ3&Nք8*)1Z;c[čcƋ# 9cQkpF",`IP#.VV;ISI`HI"kq!|aH5E]7,cYWټßwp-܎ 36g[`ۇNqn9Cm_,0m)etZ)gp|o&` ¬kK箸F[GQHY@ΤΗ<pL?.vF\FLA~]^ыAd\>/d\(yڍ0vIGhw=g[Ѡ?[D)ްim̠1_"X=gF0oA4I3۫: ]43x1KHҟޓݺfƚF"#41NdJsdNJYZcEz)-y/ ?dDv|{y'*Y6u(T};JI&`f'ע'ɠ&{|I-9O(g%)giطv(j n<:I$8Z1Wڀ$%E &rpW%@g$qA\\]ڎIZTy5_.͟O?{Fr_r9d~U? $aw/G.9|IN͵,)d{~CJ̓E`!tׯ&ڕ`uDK+>7|yMYCq~, (afB!@5E}IjRp", J4`[XF8~[fe"xnUZ0aD"r[d*`覷˩ Z)Im|Rûau=! 1Gߖ߳!Fh>-,E,e@7aݛۉg#"A4-4Al#Sj0V+Wͩ:{Q澪ߋC&o89@/>5ڜ5Tm#>&bJ4ʜU) ]T{0ldt7K38\2|y_QĴc|c3FiYuffƉ5ԁM@OkgSh!Sii<=FFy#3 JJp.t16ͱF|OAQpkxS_T^<9&ACj({%KI{CD -M F)IA>jFSBj~@շecT`Bj[Ep Bq&ozkVRFY*z8×SF- h,] \'G4TGvxpY?l_֏*zxW^⨩:;kS5$J^mԽ%8QRL!B7c&hcs3YBfDz y4%o6ʎu"czgrjr,筥FȢۘ*RAۦYS*`e=gy]47uEj 3K)hJV.1!z07ISѼ-_ܺ+ta& ϋ=2_qfH#|!dB1.x i^l-j*{sP~zw:LڙPp`ݷF4Nǟ]o]?^\,DJY Ziet$U ,;p3 :5B.|;81~ҌKn Cհ"|v/pMwf')\j1w5@9͒2*&p?_ ]Jast'L'a,߱M7yy__xeS'^ß.k7֦Y5s LJx¢".Z ˽r5. G Th4O(VB_(l*EMzE@.&Q\Ar(Q&!Q Q4*J;ȲiBD3 qxL`=L=Vnk=jouTpFz#fiMpE䚪>ԝ ˯e2#]qJx t F+&IxI5ϗ⤚ӣp4zJiQ# G򖻓ug4#3.8T xft Itג \L+Z۴o`+UUڦmAHtUD|ʩ:R&y<]W}[5a$NEk4vLtB1強(x(.:UfE9`G5Bh-<E#Mcf .d+ 7M{Ki݇)(W7>b.GlvۀK{q;*yȈ*Ttg;2S `ʨdG < > ejԔ [G5/iÜj3]&dtB%,kꏩ?= xH Pc$Q.y;.B.w+ɤPYf)?Y7;ƸB= e#U2֞j6kE&Esh2H'=CZ:t7gJ9e+*nh_Vg)FJɿ/Lx%6ޢ|h|M&LJVn4q/Y@ϳg=o h_UQ>0I$s|XO.ypH@K넷\z7Km̈́ד ,<<p{ Lb= n1'wb:ly_֭Tfhۥ+ $ S4[p-Ѻֈ Ҥ%g8 3QՋhzJc/]D Vk*{)) Q^ L^K'Z+CP*Ye/| 6'2F Ue'oOM5SFU^RûDƬP2H_b$FI.M^CP!^{YH(%k&4n\߷3Z.=WFx>#dzu5=2ft47ԝ[`)wzoPsD:\B| D+ omu%t悘zX}uGhu@Nƴ$JYcG*<iPlzJ9ˬ5ꀵS ;81~6b v $Vu|9)#2|Ҹ#ww |%㟖QHy$x>b~dNVFA_ˈF; M(D;/ 28༹ w6,[etEv<O|`|2Ƚ&1Y)J)ڡϖ\^JZ:q M3_ImD2b7$VK(heIv%0@q ]mh:#Kid㷆fckzj(45XM#*,G@zS!1yZPWN3p9Bԅ9LNqdYv&|mN(EM⬧HYԘRHn-&j5 C,!2HBu'D楈ZgDJ OWǏׂZ@=W5V-s-DtTrt1*VpmGroe,MpYAxh_}CP?b/8\KL 7ez:ƝN o&?,^HI}HNmztnS}@gj*-d`G `]ɧ~"*icQ.FcQv͢_;~> I$?Qc0p Z+۴of;-9r:܃Wb6!BZ^V9:T?]'#ׯX"u24oTdwyMooEq n?Y|,?(v}wqх7]9U(M~ݛFٻˏYƝ98K~ֈ& 烳>w_P};uqWs[]s7WRa>nasTuٖb%%I/83z^r1#eRr"N!NmFRjlwc?v<;Ͻ]Ñ7Nb3p. ezBO - &;f2m f{/~$ wΑsh(F՟Lg?ps}>=LOYKH0J^ƴJ{yJ؈ʏ6wOQ5VOɎ4$A`.YYXZuhʶFE*QREpBN[UP9B4̶*2sz"2l]S2jeFbT7bz}dh!M̖2e3,hSd!#A%ގ]H:!Nv#-^PD iy9:6ҲyZd -maa,/i9X77'ߘ_ vliyeIk0ͦe-Bo݁i<.6"c&:xEvAj[M'd2njKxt$8o [F?@|%rg8Wi=% xZG0lqR/igbⷶH(t?3ih nWWZgIɲX8EI4uğgz):vz54b q[ np7m doC2f·7a&`$efX!;=޾/g=ꎩy1xݎ2{n.+Q>}k;TrUcԄ˳3U[˞G3Hj*fhu@km:'9_<)ѣ^5?5?ږO3k#qTX,63i90o)%LIdŐ}}׵}~Hݥ-p2NqМyzIvGCߙhYHEqf?nWDMB`P %঑<E$==XH?ʍe"9~㔁8 Hj]3+jLm9m~ dC#Ԝ/tY&qSSUsg Iom |cKS̾fn<,4^ZRcZ{t'\~\oͷϿ?gbzKjW!E&d Ɏt‘tm_ k!Dԩұ"R*=dcl7r$r>u`@:ɬW+Nv.)s\RP2H3IukֆߛcW2bs:&vE"e"ׇђl$`?K}EOv:=X`5N|SX¸c9ͤ?u˕0d(@;* &λ!~9vs>tpF up~MnHniĀA_uH4Nvׯ{ê{{%=0Qp_[FgOئukuW"8 ogT)=Wy IvRΧic TW{.z⦝QљFZ%Hd8Ku&F5$:I˫&=A9(݋: x4}<>- .R!ޠ'pCeE"qI"qOOn߻@_9!`4s+̰L~1fjo0gМgYԡ wY{]NLZ믃\_CAj>|_66;7/tvrNU:;ꞝt ]LSIc6dUvPWժi>ȾǛ3}#&6T>蔷c @˹˩ ίǢʵYQن 5i4YB#}a%R섅K.ǜC9&XyLӶ6 T$ydZ*mLnPU]iSoD}]P9Zey#jc*2*ڦH*ƔD_NSV9D*'G^d%I?oUѨ}}M&h}xxjxFb-yY0>^.3|xcU@ny(`hw[0`d9r>X@;dTį8͍"lg[dzR5JU)\ UM.wR;E+XwKDkܻ͏I)>=-1?(im~(Pa- H6ES]*?OsCqyg1 rLERJfTlOR?:YQ!TS[ԕ.Q33 yNNy ܣW|wFzx |zOx{(?nn5"۷v=4C@ W\ÜQJҖA y-;"0ryYf=,d ,3 lw.Bg/d6ce~=m K2 C YU73 X7 PA{)Sxu&faJ*)6Z78QO՜ +&{4޼Xel;'1zPO4ПitWuˢ>Dz 4 E0yq 4(ڧ5w&0$q,0UjV)Y+F!Ʈa  d|$fQ8:+n6׾bu,y" U 5^Ue.'Gѱ} CO_(oJX1:GA^lt~~>Rxʼn>5u Ι}vwzt\&9֚NΧԒq SQ}غ΀\$bJsjjQiȾژzGK>J4% M"WN`ۊZ\apPGUQV*..*_G#gHZz享lj4B=?jA>>3J3]S:~} @H^yy5QY?wKQިsRCzm K^xޭ^㏇unr `"٘z"O~%a31?Q͍!I>7YZn˦xm8w׋OlM]xӻ_7էߧ4"ۛgr[v{דF8"E?6ri#^ld`06T b3ЁO>+]O%(/(O7?"IVvGРBFnC3w2muJ{;^v(:T(]j t‘_+XHwK}BMc2$w1ON5'ZOGaAqA70,έ&f V@ă2 X+.H7 ̽RE"ꕥI0?EC{F:ISSO1r˞>"C=D>xY> 4nsqd ԍSQwLG+aL_Rؒa?8wjҰQ;U&&-ihRpHUC_Iv]u' U5ZdãP0{~yܼ~[ ^eWʦ{I{ճ<`VwvMUHzo4WPݨ~/^B6]Be[S^)N TB,5fס\%s5a̽]n."3ttTnVgTs*V3 nmz@zCldHpEAө]}p@sSYJy*Ʉ-Q+V?|?ۻ>\-K7{5LZZ.!DCz,:\/Ef|ZR;/a GFٓwܿݛ~I?m0nՁ}^>N[L~٠Iybxp[Oy *U2ȫAvwסTivlX6MX ˺5a m H@c0d^+Gbzٞƾb J^ߠrR#NMS]%K`eڱӑG{y=*F>6eV2FVr{,d p)HofVeI^Da W(ɀ<&,BUq_n1uwfXZ/?eI쬱<3qu eZlRZ91RsRZ K.x"<ry!GN@Mo[bMjf Vs psvBh e YvB)< i<LM9g7NU"noAvo~Ktz<=<+N||-*4EE_{+6JU_-lo5u^ԪX㽣<MuJ13ܻAVfPu^~]>ww/*Z%v\zwZUh 5k1G%rPPzlN}?&e\Pp qA[!~փ5g3i5̹;d] *)L_RgtPHa `aR. 0gc~!=Jdr =jF6 jtm<0ʶa M.T:Qz* :{UA߾3`A P12n9.uu C::ߞyQg(A͠hJtJvM|e<$E IZ*!F3 B_ YuD|vabwQGFX"0?Y Gj {1_L`䶯<ӡ441<\@WŨA5Q;u׈_#w4kb'%()`rNsu) ?.#"I7M5>fڢE VLp&r-LZ%Shxѥɴ΁ra5c.Yl&G[# Ԥfr yeR3^4b5 Mwm=; G[zT#Ѹj=MQThDULBdS)θa &MOBx >!Lx,]Al517 $QFzE^b,-\j}V{C2S stlΠlz,BZcon5g9adDp<7أxj8z}Bu;MXn:cy/T;v^?@ a;]|6;qQrc;.L:ɨe-U=ZGU?;XI%%ر j/W\0[zR޼ĚQ͇vtJՍPt2CXfBԅz 8 ǐ*bypE[ ['k cco£7Hu|\f BjPݎmq*851[ ΊF$98[v67*h "3\ܘRDL-SI57 rQ2H'6a6Qd!˥7d(eyqjlHiED=ܓHȪjQqtt^F[:iv좝ժ4иdk5d?jo?X6tuy qg!^{!ilӘ&JRDQN Zo>>(ɢdL1W&/ާM*fϯ~>xɷ;kKqDnc_@"_ &&ۿlhkicMw_H̷MJqcG'g~7a>Vzx|O|=1͚ҽG_ [@SV 04A$onUA⧗߫'r_R:TMEhLũΰh(qabPla E0B㖑&?w4x]\\P?rݬL'(e>KǬQVL9I3c\"@9i WbRzq.+k(1,Db>~Qp6Rxf{qKc|{/Mf4]%n/*{_<^4#f(>8º\±)IpEЮ Vft]p3p`^H {6W/nӎ.&" tQGb5~2(1 ꘺3tOͶ&ZcXP{n6P=Ą%(f\[އ>bak4Z]n9wwTKPmf!2+IQ$BvW~)RL[P-m5LsJ\ڈq"rމ֡Ehєp{ilEAoI+R.I2LRR^!)v5.A_k>z- ӽnB ;n?(-FkH*)iE_ȰVkzs@|Z<@o{'7Y{/م/_o|t!VW9w}g+J#-Ͼ^mi {[% ;'!/b~YQ> 0{ #p7 Iڊ 41#V D[-ߖD;}+ 0d>]m>͗6mgEXhI[<:1ow6.PJ,UTZz>d_b/|r@S+7U#7+VۇQ).YbtqBhcE DsywעAI"^B;Ih@fV-m%Mn%T{[kUf/wϳiư= P[ĿkGx MJ*/٘ :6ֽ=gд|bX%)}vJ=ly?W;Z qUTxǿLv[D>D]l%ru3gA4e~-V_ p m" $D"2J1ZbР~\B/د_-9^cs@:KFiߊIgO?G<7,W Q9ubd]dw(rkdur*f̫c͖2UJobd7Ezb>tZ VBK%4ӴlUvֺYu614=t6qc@`;5 5犊=ekoQxAIኳ8:Ro8B%V*ZLs] $ \M:)s9. ;? yfq(d@HGXH:K(w6qv#!@01W#zlM$|JH[{c.4B ڸ|`V} cQFn@B؁Xw c~gن kpJsKŒܮ~ųG _c*iT`fqP5oF`;Glc+a8C\#O$|.ϯAKhq wRmª8.pbs$9?{Wȑ ]/T^= =^t_ly(-E3EJ*^RU"[}/"hI)其Cӡm*Th`.+Cë*Sc@L;V.6Yh⟃~aڬ3YJWlnj",zlTgôgzݗJсaS)G9ܣjBk9'/7!ŠZ5X_1({P4k CDX} M%@?G;}wd_8||O/Ν>_W*p\O&#? <}M?ݲ_z0 =>وNӚ(J';^/e7QQ |9 C\T|}&wl Euy@yˠnA vN[ZYwivfDMZys/ـu4%> [ yϑRT N!mGnD<@W%_KF(z < %E `x>-ۛF*uG~luj6Xp8mݙlA#Љ.ͷīPJ e|=u#(ui'-DTj$;40ZZe쵫Ǣِ p7i ]I5I5o:u;'=Ћhf?"ٛ]ҠfWQh< w@En!+|w X8 ;+W*K? >t/~>~D A4ic"x 2N<&FFTD1jnj3>@Ano b] cD*06Ć0j8JN5V`Rl3#Z޽}Ӽdy`l z~yr!cY T D`pPMQ]cZk5[ "{bJ R5b&+ k_*O'mkYQd"hVXsºKIG(ky5a bk`MX ZJ߾Ĭ.|GebGcUŃ\$WwQL]]ϾxCeעѳԜfdVtJ-pGz*Zo6kV[UB@ZRlVB4[b9]Q@KhK}Ar2zsZk5BU/A|(x>}-|. $5F-5e ~6ׇd7|}\VrݻeB*Z`pQ -u}2Ku`Ag٤μw*0އd)sHsB3"h#3eCA,te4_/gSpwqe,\<{q'~@ӏ^t!\s(Ǐ߳>b3Y7|?G?q@{}{_NТMf r|qnKaZD@RQo1"tfO +/l 2*N 6m8'Z癀tƗJLtw(?ӛ s{j0ܧ󉝘娳 #fTf>yb6j=2$!J#+a>LM& g(k3ݍy{{TqT?RޯCj=HUJfo)+BI0n~-XUoh11l - ?/INHgSN._7OS\E;]}| RMְPYAdcљTDUqhmץTBgݧ;)ia3Ω4v{{ #4x@+ztU:պXKs=[ps=[|dW{|a\=;Õ^:|*YlwJ o!7ꐺJVyB)ڍPɈFdfWw[Dkkļ}Ps5WzF̵twQmKkQKk5SΩy{jYR";jّ;Бِ"B[n.>VzpsHk>j}fkI:z4G|.6A0fbEK}wt][bUmB7"m[@KEf4QlP$ e0I!6mL#џ7ﻐbQiVp2K.G2M)n%3,azup`>mĕZP"!ӔJN,]Z~. Uad2Hd, Ǥ#|&hr儀 iEZ[HtA@Ar}#mϴ:Cś2Rң3%8Q1dcJSQ'4F<3>߽Y"[Kx(Du{?7_W+Zsl'EuŞ{2˼/{\uͽU8 Z2$qcp, :AqCDNqCp L07QuGkKY P $'^)g)yd"Ѥ7(Jsuii~ޏgpՖOk2i`ۂv(VR{O/~QB BPƑ;hTe-ވ Y-?.fhBP+7ؘ, Gl~ۢ̅2@ xvC67yX(RkLS]R#V6mH\RYpbRĮx7_)^cJԑ( @q*-@ttBY| JﷲJJu%;^e5$ص)ȹ4OU"ݍU=$? rAT)DCZ^l38hpy ҹ66rܔSدzL7j8J0uS;tZh+ ?VXDOEMŗG~WԨa8<_,|hҰ&F]|R |otP@y{/+9-7n՛k]c`T"1hzvk%k}΋mb$*KrEQ*yBI9a &<%()\k*ҡ`lfIb4?1RxԾl%QjC1&J2' d2G?.ytR@"GhQz׹Rpܱ~ÑhF#x)s|Rʶu)v.l)UI\r.6FTTr$KM攢ae7Mn0*J*!2XAIT(AT +.-`+A6St5n3kfָͪƭ iN8(0IrS8Hf\hRb|_姫`0HHM3hE.fESUH`Ċ KDDqssEƒ+bbj26\y¾/Raߗ¶.mvPeK1_^07AÎ'r<^6V1Ŷ` peLI\[i64)S S<0Ca7zvDTO(x%ݧU~U#؅f9JgyA2qG2ʩ)(e[+A љʽ 8z((tU "69I!2ϙ1q1^#L3̄4iDk @ Ǽ"TZU(o`4:^ITk xjk1=]0N1(БSuΕp6zA~v{tk&4'j@NQ^ۂ飇 Re/&^[~?i6 7fUӝIK^L-Ύۍe~:5L'_pӤ|ͳNj{Kv)#z'1,0Vx;coG ջk)V;CjMX@D,nA"uӟfJQJ eGH>w]nd^s yO$: T>m5а=:QzQ~8(2U^ m%t?5!߿H2b0zjLi/p+ O~Xc'l_番Cn=@,3o?ɀƬ/7EICyIt}mfg㪝+^DJߢրx \}2F9a왰ᎭR!? {}>}ŀ.;yܧr*VD"``#.)&#Obt5Mt*p;ִq*5R(%CGT$$ٻLh/;X@L)XaߓL+x[_2wܽڥ+0fK PwlcnĞF<}{rk=4~O&xmmQ>Tsf{HNm$=D_M[|v{ ϲ{%e}kR!b-g[cONbWϽ Y$v}wԭ~nNK`. V片[ly(k]q{8 ǧ $cxTmtu.,?-tsrO{bRِQ\> u:!^#]a7‰ {#[ ;POԤ6! aB?͠zy `(s[[=mAKG58E)'# S0$T%&Zo,Lw3i^`$mV*6 bNp5d}Rib#xw^σ2qiL(){!*=x5^1=ڽ qJV2&=Lkx7. IR~_i:/Q:v\,x̰ī{+~~GV92|Ż[''FAUw|\o6wY?67wQ6_QyBÍ 0=Ǟ=Î7m{΁@t3yQ2#]n1o7NL\A~1;J@ʏk]n&U(~]Gj~mUxs:l=v3P!<䨭 g@e|Yoµ#Ϟ! юGsH|iR4tßʡn{OzHib8ONL8ȇ5{6ɝvz}SXrϭh>S+pN.i-m=zwwօ>N=A>8zPa5<@*ر A/C0( "Vp!u7pch.B,ayu!3[u7ȱV[1 -(]qYہhJpq1k`"Veg,"fު\ڗZ/|xuY`f,zu`o. ' '~QU> {N'ӎH+yIaƲJ?v8q+zȩWͅZ_\2c hFZ/d=Ԣm{=&Q{ Bz~iI|RaD}U7KƩB~_C:suZ8:%ӻ7WH9$48INL34!4&)D3Čpퟒ* ~.Rrլօaj&&;ĬVox('O#Cd竛87h=YbX/{BMT{y>UZwD*A80PHE+m#ImOTއ\`VRq !++K"̧`Ĉ~;˻;"RЌ@ ɧ2dB~E.wS7W\L]|ޛi.AD||6p:/ɸ^F Р Wq/A<~fdf.3%^1͓̒st~#a!.O8nRaᜎn䌬 $po,9]߅٠93=u )+>ʭ-#rgK?VUmQ؋&Rlќ; Nn S@D#X) $Ҝ K7^h_HIjx$f\V?`eC*+8R5xMb/AM`@f7+&@rrrS~r3ږ~"r-`qEr7( /R9ƂMo& ž ?b.uLnZQgqՠ)!,JBSTđ, [ /寄zU'*D}θI}) Vz@/Z y? u–܂:^৶ ?gܬ>4n";&ggCI (*6UxK #+į IV5=)ܿ7C7n[s9smt#"y*ê>T~(*{PӅSWjAX3HWC&PNs&@3̓40k,n|Ӄ #^Q/i&XMxhq;'?ҾNa-ROo%huC,|sRbZrX}d5d8˖T JE2knU0uKՃY޽'mSLd:"KI'e{6j *4YHrjö[ ''i:Цҩ*X=qjZ-(F::Q08W &>k꛷-&5ΐaeܤ=XR?gZ$,'ͽxdO񊋺%$/ ,6 oĹY~84 -ֆkV E]Ps6HԸYcӕdm[~=n6kc^+̿Z!+_JEfV˒~/uChyak<% !Q1<2h)BjUaݍS-kPQ f!m҉խ$i֏+ˇAjKը rGÕxm9lƌ9pk.i&^.A+k74D;!I@[c_1w6ڗM ;s.ӀM^>oi|&*ȾLWՎf,~MhQ&3j&X*$bˑExˇ_* o)ADer^m)VۇEhU܌sS =U2*ĩ|6wsɾ'_z`]\`f!}LDR ;ZE2x#nq;MڐҜ(Di+"?&Iaa1WNF牡Wmʉў1JžJ8\9笓2<󼸊<1k=-rёQh9> bnpLK=EceGB@x.&<`0٠b*(k4>Vt 9(-GCqrw(WsGϓh7bhk՜F5ME|f}c%B5jZ;rn6"UpUFYc#}$X#bM`ZG L]`/h}?EA.^ڍ>FuS,)Y"I_3N| ԯOL 5~ eN OIuM@[1"TmUۊ:Ebl?*p RܧnΣx|d^Zk8a҈xU!rv?Ň2ύz YMO\ޖVF'w_e\rusżbZ"@^|^3|LPĸaq/G'`7JTOP+j'5U*dW]j^J9f}mQO!-hkpVuӢ~d0s8"̄AB0KD(F-r6kfsA&Y.FΌCg2}_?A웙gJ-xxJgi_{sqx?_vs w_<Dʥ |޵a$4 $A܃!KϢ?"ۉ|Ib4]{B3#34gw,~/`lFKǹI!ULՠD#? l,~rKENʢTT$w|^K:0lvMi5ͧ[ ܴ0t1^5A bmB<'`%STiQhm,gҀn`r"cT;$CnxzӸd9u/}MnG)>JA$;v?}TAP߆S^(.#.>X/ &-:JЌV.']Vbr&$irK-ӹ'?(3o>m?}VPD"DAHL`Bn쬖Vp!K_ފ`ds}=5RZdgbKOü@ڙ~8:ӂ >Xa%(bKjy]dR$ @jf-4R9|!} VBY˾R"Ms>htϿz^o{/"' 6%k<"9aUUS%=H/&[sN]|.f2_DX9XTL{̱\lƑ28ܤ3~NI^uq:@YkNڪ&tH4:xeNҪ~~ ɋK׼wѠB)Ns},PrN5Ys}:O˳n!S(HhB]HaȉqE3A"ӿ|L6%jo IaV쑽gc/^|.f<9YLaL|y5;x"Au'}-9G$Oe8Y< D`qwu*JE28 ("e V L@* p"hO1-hvm(/mV7O`<o2&E'TPJPŒnjJFTṁljkwϮ8ɗ d)Q!){P%b̃k\%ݿn4FQu}[u| MIl(@QlcTz7'r@!R05Nu_G6?Dž_ZMf*5vm]lGbw0Qp[&̓>0Ơ!a& `V6痜hTeXcWA:^!݀sfE$Rς]b/8h͗b`_X?ڏڱl[rP6eE6rxf|g.=gW^j~Xi͂LiV}u n Y2d77g g5Ϝ~Zd|?%6;oE#,jpCV7ӱũfέ6X ڳ@"^9 @-M'9<S:AËۻԸݫ+)-@EAzb0Q3}3)­GYNuJ4sZsZZQbN?~NQ{4Xk /5bw*㝜7 V?V'h[2{硿V98 Z!1o; cIĖs3[@0zɱPAǴtZg0*=Gȧ:[JW)gQ42Wӝt?\5(]Hj>õN D1E0C$6ϝAb*agn~Z~I*c rİ<ޛ1Hx pA$6A'tжviM{hm捎ܮ_$K`a&v6T Xn}PqQ3ύCd=1{N*Z=p/`OInU $H^7msU)""hcP:4$/>4&a{⻓WyNZV|3,dMwdMBZIBZNT%̓K!ՅP?ՅlЦQd}/?6Xڥ;9S/QpjXjERS2D_p%H'cҁy]Jc^2!儒Cdmw ~Y_9ӝ p?*ݲǻPUn6=YXF&Ǥ$}^TK7+1F%%Fe2-au'^n6x 9uDDwB;'B @- Bkq: hzU%KP/cДl# wzUp짐)wZ>* 5E*t0>&_ H0u:oS 0HLPmH{/;BA( me&2Na%NMoz,8]D%:ݖk"sNj=)dv`룱]}; Aia%B6:[ZA1ڑ%Za^R9Kl)p/29sYb lOݵ[md}6` 1 cP `S,#˴GT*qOڮh,oy9>5թmT3$j-cl_NԂq 5kAEA 3IP\+)p¤}{EJ&pglM G |XCB*XZgWXK YL0Xa0/PŒmlLǶt+?Y0Oz[EMΡ@2@R-,tpA5OkCBEwbHEnj;T#5ݔ~$r$}Tˇgrky7Nj<9lw]h=L g-jh ޏ3y]ywN/bǨ( }_^̢hLkU$C'P*'t YE%MћI6s^Lo+Tɻ7fYR4hN {p^+oVY+ d{(_\/E=']PL2smo2&H> ]nsEU;uroЏeHݸ/f̶!Ntc vNniw>7xNX~yiHk-[RW3د>&|h#%p3Dc<:F!yIa7B^'ˉi AC{SonI  hFnWl݁ui!ai՘^{zB6pp {_Ǯ}1z;}U?ϣ 5)~UdQJaJnӝKq?ItJRJ"oܤeNSpѼtxpzk;8۷|of@ @!;wX*t8 8vI 7+C l _F'H:Fk1@ˆkki30 4BS|KֱpbhGPMg%j rbgOs , *T@\ WIk=q",ʼnr/ xN!DJ\chC bچ3atJ ES,jͨFD5pd2%kB[@5E¢BcA5no1/ϝa9WTUЯo2[]^==**u_(<媢L.ktqEWnawh*P6^)QǯȗD%AxWc]/ϔx~ZрxbD~tKΏnJ'cge9r9 "`"V@ENP1ڀҽe6dղ^5w۳=4^ߏVOw;"CpG/Tk}Eͬ[pz] FjK$HP o<ẃt" dNiq߭b}3WiB'PVF!PtJ!!k#LDOO@~hB6҅tKr9)Ql#̎Y#Fz X4Dq"9="-1B!uG{a\n.@f{'/)iVIm)bM'JHj ?n[bծV_B%g0#ZTfQJnγo8(B o0|0߈OWBDv`3 .Z2"EC)D9UH]2oD9g/4@ĹDupbLk?I޺!rb RV/,aޕ6rc"%3w0\ a&0*--Zr/ ߇%vigdUɪ$HVu}9\΢Bc f-bmoCd=Ij̷^T[e;#gZɎA3(;f$t;ώto˙Z]jܹD4S-32-aA>CqH7 ?OW?~n!'lH!4R\W #AȕG; @()nvc UEDewǙ׃&LhFGae$՜aO Hwu8hyu ΌY2pEv((02rMHvDrp.w˳>oZ9=T/N{:4D,y@vfx)ݔ)ݔûMmH/3 Mբxxq2)G !=G`^6 N & $#E?d#$w7/Q5]¿lk[ޅtBB$mtw'dkKQU_&ZY2hC!^.'AsZlIȱL2zĖdضS"s(2]:{D.bJo.x=-r=˫#eOh6׹$aM+wiPyWOHCEU#ۻ'V@]k2i_6nomÐQB,;tJatn#֕BF$eN#l-NTژ_2#@8N]6ǙM뢣qBA%62m>8o[dhB1]ps库-6NU';@ N7l 'YG_ѝ/ϴ4_^0[ qt/F/{m`Kp /ȵLRdi%%~L{Ց8|N$m0!#t"s`ss1FV0֒N%(OEeK1G <^j -!f|1EDΏ %DQP |jʄN٣<!g>DTtNW-݆vMX-:ڭK4jxj#̇cR6~ȡT3:A"iՒm?g&:`q-(@;,qDTac(ssK6DtF8D;YCdEJ'mDF`H)g GNֺgXe\# 6OŽDO:/(;/PLgDI YVX(iŁ{ cW=JwG 1:y}iaW=K1"8tZbvY Y{q!&1QZ^bE;0RW7}B ғ- AHJsoAQgX 6\z9* V; ?C!!fK#l'\/B8`oVgU!Zts|w(+Vįw xx|3~;-'txζExa9$]뺤n,vA҄#F+[+!yEF Eq %a$ϴ"0 ^` XN%%>rϒ%INڡAJL7 SY.Pݒ%#JLI179*dЋtѱ'J>O?IEwj !#`dZ,'%U|=eEI^~ *hOQdlmSt)Eⲻ,lnSկ|T@|sQIz΄T S4~"<%ECC'{DeMtBp%rm!vʡ/j,CJ*S_\}n <̴So2Ia_ Tץ¾*e PO"bBL-1/ ,,PXS8Z,˵``* &?|\?.WC{xq. Țc}9[n̺[~Cr%c&+EL - Pa~ΤPKrJ @v׃@gj⯃#NV8' F,NJ %5EU,r A2+87`$AM %9Qܵk )Qœkk1ڡ)cM' @2E.[8("Jaa)ɥ˷@ Jʪឍؿ{C=*,x&u$,ց.B$jBQCvf2MWhG8dj^mH7Wax]abt{*L`̳Ack Kw!@; -"RlQն~'uKpm0d+y+d2oGv1ƈaFnRǏ qzNh˓>~Y =!<# Pzj0?nro0]C]@稔u]E'8X ]P<r)6eF.a<\|7TgGMPa{ AȊ=.+CB]jjGZMͲ{aDU"h#+]ar1[| [gc5ZQNGLղx!J) IDL1 $iȹzw3| PZ N,lXa8˙e8ǂh0B U)n$߸>!Kzټ<" P iפPJT5T G`V*ZPCࣈy*uyL;ο]TyԺ)o~eeӃqꞞxbQ} ]tĻ}FNr&f^>7;Mn>K}GBւ кWBI/fei$yG=.PVׄ`>ɂ09ԎMY"0DD`5z< RY½ѮG vIp lM&/g7-c Nw~c闣5uLlK"^^; P]w؀^$+K s'"w8>f۴$@ٙzgvRs0.}~tOǀ ]0ǵ/AY&;]Pg]a^և?Tz%wɭ92 "];8$ysRxdܟK]mBGD+>`M&8S&,F81ǕC8./i¤t^t uwQЕeʼ~Q x!2v53t-ұ5Ey=B=Crt=@=OF.+IәR4cHɻpg{RJ gy#]Y#]2΀qR53>8z~rtBx``;Ejg{@7m3p(2EC{\ףvZWPO&5eM`0}FqnFU L:^ncRY~30x6%ƛe7Qn?fuX7f' y]ZB^j!-$S4ENZ * rt VEB0Rc"@Y/e>Y\ܜq1 LrafdӏN3}nL_F~` v}@m~U/0L*釫r9X=h˴]8wOtXxŻoޫ*Ta.:FV0E:p@PaݻE#GN+:ͥR I$Ω smdVĭαFs[U0YƉ㨑pq 5tZk,[\ʜyKrk8%h %$qM}Q -( z&i7(pKLSY6}NP1 S.`dHxB'dH%pu݋8rCSя0s0L "̵HD-?AfTrę22a J:8BH<VkJ*e'jI}0ʤpWPDB(Jd f@4㮽^.( c5U{Ocs/oSۦym幞lԁ^C|C^w][\0~:gwL^ٲ:,2ydLJp̢o7+O 7eYn ~\ǀ+?L^z|K+tuQ +^/wj٨?\GMzroEu1Rq7U7pe:.o?3ߠlޯol̘F_[ DZ$X17_qߗmfH/#&I.1eGr-#5>=6"1xߔ"&J/(N&PkHCm ɋ"֧[gST-I~vP{Lgu5mGie qSDcntYNKqEl3Ilm aᷗ<,,|޿gpKlޑU{x<hMîWA0?f#x9l F?43{Tǐ6_UqwӣeaǤm=9;Zgѝ͙k9hj8y &qNܶz;Nds6]I@֗E`h؆K; '`!)ԙ-ddq]`6Vvf]'Ƈz,lP H+A4Ъ r$h;ƜwpϘOû7 .\L r_k#NxHY'1Xs``tLoDz7t˞UbXYo#bH?84C۲$mAvkځHaSp|Cr(O6Kdc_n8FیuZuX)n^o_1n|D2D ȇ׌@3vIw:0pIr_i˿uo[$@Ugƫ ip NMâȪ`ǎ8HŻ]8?oh'UP(V"|vWWAci</4p [Ki$\KS'ʩb&B`i*1S@,]ODqq& NTBE֊jo!iSbyڍh֦l1O)WŶSts!<\HB\ 6J]N~5(zZrh=R 6O6@PJ]1%1ײp)7~][c#NT5ΫH0%Z0??~{V.j+=1z1Z=<H_;,-oUϳY8\-`ou0h.5|urqvti7IWuLY >F\!BZDP!uGոoy.0%{x"#LeD1z:Jl'Tf.6qo_k]a=Q\/?;+dK֚H H.ԡvҩ2ڭ┶j7+dK֚"ysWqHqwaR[wũnL}7 +dKΚbOp8) CзN5jIxkUd(A_bw#Hqo#s7w#%wQwdA8gǎwdS$\$toe?4b~>j}sL0D102"cJ :h*KSm')ir}T#nH9>(QZ+(UɌ@JvTbFDE~)"pBl`]ʞ'MQ$ZbChW3O`T\dΞ֧ +eUf=k]xJ4"Y'SCҮV8I0[V^}Vc^ڈ8Τ@(rݾC4. ~R̪-I1E >NVWNhh{[]V؏* 0[!,?L!Rn1B%i^ V2 π-{\@? /K$XNAXs"!܊ L)`dB4RD`a;ݢ2$f6 >BT Nl1v)rbSc8fdV:*C:p[QƞT&úGlu)o;tNʵh!#Gf N6C@*;ǜ>V:CAr HF$>1X"u[G ' (P9Kp7$(aTlK )  |*6R2 "AD"$$$,!Xz1jB=_ObJW fS4}xP?v |]k"3UOatFOT!Yə׋py]9oT/׉x`llEbWg}e|K<|ik-"t %c!8L Da#(fBS,X;tddyL]yu>@%\Eֻ%^ ӳzg;8i(/V_kEoZ&E7ߠ<8bVL"X}1Tfr!DI4 %qL2N s Luegzl*+ SKTZ&qW^)eӔz}ԗ.} 0Ñ%gXmo0 $!/nkycOK7їpq~7o}e7U<'3v"PWEPS$xp"-%Д1A(N@BA'j"R8ܝ~D*I%1Hx #STa4"%qGi+9a1 eNZ00d鄫p!Aa4q()̻ͼLJs}Y-q(neflAVFV},_o!#b(eIһv{bc\unk{OՆwwe$q7U7pe:.o?3ߠlޯglq֚~Ggjé>¸Krk*g2^i=FNFAf6D/F$[~3^H#RPh}u&b ,WS8ߏSjҠ̈<$:=T~iG򫰥_}` R)(N0Bt^ :2G#e&Μ$$!vb-NA.r TJ,{=`mlq.{==tbMK@=)MA@>YWnF}bԔ|%9f)!)c%O4R)z ?Rll'(vKRvHQ8 IX9?xyDÉ_zs|uuF`=Fts!nBj%$jޒgD#(eDz#"wgռH LN$ۉI 4q~ Ҝ \m l%vb LX).kOnY*edW[}+v襰KϵkD5w\b&fH4+}Rfb<㔴o3&V C'q\v[iW7 NЯ^DpL5Ra=H*yEU7% # -z/%GP4mLodu ) #:&teДCWSԦh9v$5(qiAqpBkEuRbAPN Vr;lLx4K8N\(S +:l `SVܧ2T'cw~-a3mpbDʱ 'C WL~}1(+@1sF"#I&~Yp)><tuf ̷$}W-6,CCa!hjjHGwF/- 6fT')tm+!wRx0]y5:=3y+e|}MDV,6/ leIXRʜpm;W3 ӉF2R2/`+U|=Г2|bCnmOu$ܖhd(<㔌'5 | ;],Z  umx@`?2>^:p+^v@}|F>SS bP="3MO.Bt[bmC"%hA/9~aDUT\Se)Š̵LFbh'bG)Em< ri8ƼKk<Ȁ'&tNːtp%|-lϰ0Nv7 W14׾Ւ9)y&KH`x'rdK1e;b}X* )"֑u-%↉͡+w?9B5 Ƕ 9B4 r`6!W&6 q5cs},Y<3c&q+|,gI sN̝֚7mKn-E̅u# S|Zk:~:d#&x.~WY"WVȕ.refep9j'(56!=`ICu%BPҤZ_w^_U!XC¨|l{hҎ^Uv<U g4Uuo1Ɨ_І(4-PhClٽBխu+.r݊\Yw*a )0$:&WAp&R0 ZXccqԎ4gL0KK[ !\@Fbl$#Ģ//J E8j0۞JUp:X ́'qKF+@ɢK Bd [jpL+џPfu3Ka㘀)!"P`d;Yfe_O0 $t蹣L ?Tn@ !4$h"^ 4*(Hp,]/Q#\Ɠ+;}b<7O1[ϷoWa2= Z ̻_ܵuj!gNQa]c!Lv ޳ͪo ~{Sp4UᴃOi[Ε{տ3EIgц"Q(goWקRI8~. `3,䡀!Z{]f(=x& Й Lizg6yLh*nBJgH͂AdvBXKBTf={Kw+?_TƽFH >*URvWGF I%FwW͗[4ݸeҼqx;W9-8-QG! j{>ja0+iI/,MM:t֗$fK%$ئ|+Ih;W (m;uW_ye-Hzp&ȋц^y?MdZڟpTI>=M;Bs"\|jwsurN%S1qG^psm%~p_ɕĶvte|R^eu ɨze2p ӅMI>|hjƃKك?"9}aWxFZz5zUDofem'-*|PA _#{^&Of2{5UVNg†XЄDNULa,:rƕsV]hM0YJ=Nr{I̿ $A(#Z.h4A{LNVC2Q`9.2%XoP<;|# $\VccGÊIwE ~Țsʖ\a% QWv$IJ$S1AD']sW)Xa 1'Zhbߟw?]Nj3zu&0D*W.[)!= h¬z0>T?u8ɷ5A@^^zq@gD_ >,tOC.W\"wb6vVn=VT|fO fϝ ?}r0woruwV˸~8ap~C02Zklm⧖[k8۬὏>{ݡ[k(^ i{ (teد[k8n 8ȏz-hwֆ%&-KIpiv(%v-K!/8R|o;~JNSF[ɕ' 5Q/RK%jT?-杨}KAJ˖!(fL HyȗސMTG>_&!n{ %*gdW(<톄S+d}git&(1 a.ڢmvpm"GA7hщEڄ\2u\r]x2pp]Al*͙Vй3a> | `rvF NJ1j~Y5+SPǔЫxQQ7N??i#R^f]<.޷;˿+& 0C\rQ+?}(ZҐ*ZSjY7Ak[F:m!zk'6띘ٝ񆣪9D-IL"olq)4Yz.D"dRzX |L'!mKnib-p)6*+5DnB"vka|GP6ݶ锋r[toB^Fh+]tgw1.&L8r.'½& jM\Q@9:3 vы4huvё$L9:3AWw'ΖUc" .cw  r( .UlYEWpмkf~ )~:@P]#kЗ L~2Cdx9(O78$7d[IDzx` S-OA<},A pE sI| #1 bČbԎQ"tMhZyce?ci{N`#j$ #AxjH/3jsQ'MpH8|$1Ep셪 M4i3 8m 1=~s1zbƁ8j!q N8%<Tƺ!QZiН4ydwʆQ4n(9׫)@9`-`Ep.Zh .ꖝ֕.v&Ģbm-sèM o:}kZ7] /dVY"eSA4Z%T[v h9Gܨi Z `t*@ivq!aaO:Mܞݷn/(mzBVT29/(H+閗c^>CDxyDs1D<T['ny9  ^AN{-F4mG{a-@/xQO &բOO}(Fj`TRk*E5V<=~x.IRv#fPpY?,5DzkQ걯F b*&uCsBI%MwPiS%# ҍuʕmF~G r<´%u]M4wUԌ'9aO3z{<XUVγY_9V9]h35uWVFU٧Fn"ﻞ^k"*]Gs/ѐHrZ٢q"Xn*FL\z/MxGA喅\ 22a5y)x/[SA㴎9/X7䪄SjiߤmU#Jq9FDu9̱NxueXb1&'zSnSU9#ŮF3^_Xx_m^XTim(gw_ Q5HꥂY!d%y<^S%A 8,s)39]'y gA"m?GDaB]_vVfTEmIQJ&HJ.RƹksVZY-~ɀ$5긗URɋ~Rު0ddNtҬVB鲄!헃?Nރ=8 8\setl nz8GGx]hVx<1~h5zvHO*iy>ؚ B*ls ʬ@$2+MZae l.~; mJɩ.k+8eBiR>?0y /D7xBja˶ VOm/7mD ;-3Q:Y]NCSFkͳgc=Mnc|J9c:B\0MxdNnݤȳ?OnFPIQMPܐ撇Ņ*~zlzbSZ(G!T3*Gq_Ƀ1N4Ԙ:{lqz "btq%4x,].hhޭG:ͼL ^=5Q>[!? /ǻŭ!;.Yz/o?tnn*c/^_psӅwxm.TfVc4?^ٯ~?[e^E;V#vrsc]WC;t^&O# 5+$n0/\RL!"khvLi zkG[_ y&eS#޲Z9zv nizX |L'!m:ޥAk-ZP_B^Kh׻!Tbc:nC :ew׻nQ6u*┻!w1.&Lg Ө {&P  Fug1Q@_t$QQҙJat$泣9:J<?1* פwKa6TBIVAj[$Dؒu `b#PFs'0 Ak6w?J~Vlh^INeVD7'{p8J1`QZ0Rfj|ތL;55)$Y 2/tvi Z3e50pe.k:5C=X04~x+')cSNK6EiM(;hthp‰-Z-H%RT#MĨ("D+W;!0zVPU۱ }3Ty}(?I ):2JPVP!UUQJ.N\bjX |bRm0=٠S,;-q y&eS{ntbݦOt:jۻE RX 7(BY~׻Qfɔ-/Ӊ|Gw\Tr-VPoB^ԘrLD་w1)gw9 R7jFoҙ<[+)Q 2:) EG(3GG/":RDat9:3O XTLeؐ1*ݴ ѢNCMzh;է*$M.|0Y[B|Y(-efFcbg+I*BٝQP$['l%Ľ֭6Jt}#;%U y;#AprAƨ4ĈQs&鉣̢m87x͆jJ:MeDvз-۔Xww(ԭm5w/~A. 0|~}s7߿7w7o;pu9DwV䗏 gBwֿrO~qޙ3 .Zx~_ 8?dFzjCᛝQ+&b1Wľ#$mb€iтn}SX 7D\x7Ix׊|G}tihA[%nQ6E&»bc:nn,N ^[D RX 7P w1.&LD*9& cs9vW9:J9Gr~ё:GGstt&h@uvё&L9:3ATiQҙ@)-7,  EY:oF -`01Bw=VYowK7)k ㇋?/?t\?xankuӾ|}u}վZՀW/#dDFxְO )WT[Y*)[tλ-`"5 qҀA}xRxoH!3'X6V9Z-P#Y(.قI`U|!ᵧCVdI `+As}*7VEx{t½,x.ăAE-$c| CVuJ3Q5?Vž}tӸpam vTSշ'6oFZBeߴOHZj\7/WBi)kFb =ȗr@83Lrʀ v!GlnY'){6 }rAK e8 b5nĭ~vY)ln,a5H@!HĨL/ -!C*z2`V[EZwpJI- RL7]B3BDQ^Q VJI1h C9$@ ? E//dy68 i'¬V%^Ty\o)r9K |!hgTʰg;j+{fwLg7@ vh[1t0qozu]a|ºH`Í #2G(vw5qy&~~w"巼@ e>}~wTwŷ6{ bԆAA N[ۘw{νo.~[_/nS=Y$G(({)>5.~[?c"o_Qe`?~!}(l04za;biQ]&F8!|By`ryo& e;,*asᰎ/KVB Uyӗæ4=yc&"He|Hp[q /F7*F˭Փh$iIEYm3O4N3t{胺V\7K'э©5Dq==)m=HZ[z4Tӓw_oB=yv۴xgw{RJФ8dGMvb HS'ҫړ G-+'D -&2~t kjd\$Ϟ=A]^< q22हY9 &H$(!5@MQf=K]$m顛ߖqUq<`1RdGЇÓ]: q=)YVټV ]y۔b#fζN'[-DOQQ)mݧ5Ǿuύ QFzpJZon&ֈPG9nT Ҽf]Zns{~[wwS/j9<~+L7kS ӽe=Ub1?. ޽]DU%U~nTUV h&'Vya{V0g8[¶nՈ"u/`WbeX׌v}p|0masDrJ_-+ӖgP9  ҍ֡JrIYƳnR6F WbOb U9]UpSX>A:ӔU{<Rʂ00Qٔ\Ni:?3ȱ nRȒiw K!uRo"K!WOWa* (>ጃL:Pxȡoj-qʂ INF1DL]Kh\TA g%K x'@ydo3)6)%eǀȨz F8h/ snz(Q}w>:5jALEmAnC (kDc $8L磿]FXR^]#W惙xյs}|^FEYh:?\_L'ŵˢ!y1ci=KS홝-/rM)|jeDШ*I}GctԵW#[ y&bS2:?،n" tR$8s: tҗ#[ y&dS_F f*tf1|J! FUA4a8}#Ћ[:vR mF'kyC/5Gky=Ap!XK?r1'@ ,'޽ڟj8U{I-qD&z(KbegT;mSi-VQ];ιqXJCºj0BTuÕ,%VsյQM.F̸Ri*ںKiiqӐc+%-I%ux_u112*dxoq:<|Y8`!DSl C$э9[}2Z I|G[}U^AGR^`!DlJ+&`wSec:hE$ݭF%3rX+7mc@(sfl3֎W_)tbw)l.:hNBo6$Rӗ~XO[svބuz9~WλaP31wAb?<|a+c1.XmpQʷ\#f+U}y8se8A܏'㥽\H>~q]fK{5-`-2(!b^E0);( "M~Me0Cte0ܧ_dP%ra Z˂Y+RX^pj!] S3(Pv"81i'+"@e9<%+BS" kŚkQRIS\Tsn@-ur/UtI2xF,*'3d@L+=B;Ysk2#}nӃHx$DaT/QKx!8մH(d`ah%ټ8gЉ(U2"0 |LxP߷&hٚĻϫφ:^ ޿q,'lH戊oE1N'7)fz}~T!QE$"RY[\k^VQ]k*~~a"?nš;mꞕSa>.<[ =ͅVzzs݃jx?Ť<{5QwNb^pyKCD.e%5V#Fbgf>n1#3/*&oݥ$DrQLCȥ$Wг&\_ZIT HAf%rdFH6葛,JxEp<$}g>/}{308^[}1?v'ybWI=/.\hc:+d:} ^,L&WΩvE4zX$#v]L^p<2᧑l.9ZDlxB,dk 1V l8D2QSi<1`-'5TŘBɒz{I2 if?ֱ)wRx$ -2`bi?/?xGdzr5߹0Vs!\s,U557GTbFk4'D$5AdOo+{RD0}; =7Y+"(0lmG {rqrdm GgSg#QascuW j\G՚Wzslj&[60H?Hg4.qAe,H!NiE$a5ib(8F{_22Xs k׉z% W\g( 4 zӠ7 $~7/W; ųȻ۹._ˍ5w.R{_IBj1-]_ܺ'T(1emztNS}JV ܴ6hBӁzȐ 횉D_;e GJ Ӏ\hgݑ ~.T0]ՒTT.y-31 SJ"%9&!IT7Z{*v\h1u.lc\z1.rM)ftERec:hܻo2-rM)J_qnD7Fa[ bL'M[Pw-B^mSq1Kwҕq3znۛuE3a);A)jc;Eu&e Skڦ9X&{ H$F#\v!U5JʙD;w?E2O7Ȅ YD*z,lQR]%?z:V"M3^8Tлد6PgnE}xM [`rj,I)W "F~%w Q$P/ k5Z(R=\\.&@Y~D>DLR6"Rr7oݽckݔ0/ h;a@ŖfJFZ0P5Yg=PV ժQٗ4ǢT߻&—!->mV \9rG Go)3G텾~r[laa|(tu9C$p8sHiAQ'Q"Գ ټ5֍To: %p >"JI*[h?e%-+PĨ8[o@\ E2 +{oP5a^m21$#`anIh]@Sӯ5ĺ?}r0|M,iA0gBP l +njμnF۹l@ 耖U$s'Q'}LqV)!Ĕ *}}K)5*8(Bڲ؍$718:Ǣx+@ՂV+.bb5wJҏA Y$QR'@<Qۊ YɒR@J9|t @Ƌ%85*4nH%"n +"1 d4cW֘*]UseyY4ɧ%v&r]SGre}>-9a9+Quer7BD'dA`TOo_'.W{Ry\Ff<'N9o]7N[\,!w.l;@߹om9P {tyG,%:"\P1 /znE m H:0 a?0仿oSVT -KlqE+Kb1eݴTJLI&(5C̊ kէT;w\/Sg˩ Eߩz7g~>MnnO[ Czx>/9w˥N"W7S=;kTu=h6ڇww/{֡޷1DYy~5F"(ݜN%3 +ƕPia[+?A\0k&(J rIs1EԂk'\3J^X鵊pm$=.})5Z5Id;OEE垝;ĉq_~4PtS;k!XWΐgy{q;7DN[&9CvFhA;lD7"nܷPyFTN{Ś*+TU(B|]E7Y>ݹ?n'#ScFUwjhr+Z˧;wdtQɘ3K3fY H~t:hU8؎uA" EB"EB0qC^j?si&NyK`,ۀvE>Hz,l)JcT֘#@b^r/>}q)"O׉Tcv~W1\ ץ;OBҞPcEEŖ *jk%) X p,1Ն[x7XDvs,ӎ C {)GSBd T*':\Dhv~ }N(s%rqM U4R%2e2GQg1IN v!k1_Lˎw\ 5tOvKNu"%TCzRASv*BЖqG @AF.:ȿ(i`88!?N2&@D_3fI';J \֊K0Fue4+RTUdM+eh w"tcG$۾޾*R,5%H!A(-Dsl,(a5=‘2JR">o~ؑ}u{.v ^B޵-7nc_ae&3U~vRsy $nm#ɝI@2A )Twۢ` `cc טZg F8w?|UlD( 6'}qDԎ2N.j|]kw6GJYnuD?p QS@Sk,} {*P)g:Hw Ԭ^ﯯ#9ѩ\>%L|P3vj . B-^ۿly϶a43ԄzCiZӼykM6oDte &X.@!*k( 2\%\ &$ܤ\.LR.OW AՂ.4 fֳxYmf o`I M%qfĕׁvoh FQUɜX;eEppd*4 1 @4 #%"AT3X*5Id6Y)ZډhV1#8Ōv^Q卪z̈@LjԾ(LwMM͙`Ŕ T% &!JP!gm !J"SD%"!1#ŔI-`Ԙ[ 6n4So*A.Ai~&3椆wRCK!yRjaWuI a\@N2=8*5uIIXtaUdKWjtUR8E÷8L"heTC;*c{6H)%iu/KaHE9vܺJhb;Qb|"A>a$aĉBcB@{D"yAIBنQJ;sYшn?"aHcʷi8" Hl~F1c0q@0@@ 3]Pp -I(G+R&ȍan`2 V,#wKP"BI}ɷD=/$tvSz$wq "a%w2m~`mTj1n#ūsڕ*Usa&UMF*U%= -UpI<Z*L!4#]sT"N>?ŧ|1|TO$CfaCThCJ%0eI('oqyۑax9J/CJNl\gtlwϣCYa vid*=7_7s2HYt/KW7\l)%mPOo<%>o\j64~7qtW37o?)hҋ},qȉ'oOc<=Mj%ޓQƁR6a?,˫7?/u=?p_t#'vqJ!»,D^ѣ&b@XA (ҏ-C°3wa!z|A6E֢#Dz>;62 _ACXYy<or?D7A}3]bc[x +Q=I(MuK" CfQ Epߙ|NsL!8kJbƓtH"'ЬDZ+oC/΁IUkLn K H.( ]k= #ć*G8w<^[mޅ: ?fNR?~v z. F'AzB߅}qcHz܅}I !4K2K+vqRIUm+m@%n+R#ܐX: 8$֤eB=J6p9E\zKQC 9.I-2ΪW:} KLjS}3 %[s"ĝ;|AbK [H-7$z"" I Zi"QrD0 y Z$nDn@ϼId Z&H#nuD,c"aԾن۞3'=d-0^cZ`Ԯ9UMs| d7)<`WzImj;1 =d ah_q ڕ9M3I B.h5o$k%I~]0FВL|\E)ПQ_ %j1ߛz04Љ1Og[+sk~nm^gI0C#F<HiJ$\F\$gW.pXԿB;L98J{#g{^9B￿˥GYP}-W{;t |=Y].,q|-u7߿{~cpV_ϟrW٘=%#TvBv* P? ]mjJJFb E Fu!j6!XV[=NOy}6JE"YO,@aR]\B1^1\A+@A[ T5 ;|TSG%-vU9i#vR "NʁqW&hi zUGkmPuePkDY%*L CxGa?^T!wW۷z2gmNR@囅,wfJ>m*Ks#!y2a0o|f><OF7zDdq0HE\@|:S:wO`sF6:'$wƱA9y.Mᗥȴ ȴ!/j!XeV͆ȒT|`(ѶXWNGhtR`e52:߽֞jYm.㩞2K8yml8+_F0jB7d%o /T3BD+T.%цw('ݢwT-wkq(ean#Pޭ 9sdSEͱL@)]׏8_GO]1&0ߏ P'Fɽvd>-xcs|Mptfe3ih[V􃋝&$֯?s0B&gӳʿY~\k9'Xs:DN#91Ca5^IY c\ l߻eRh:]ɗS<{|pۉ-'gy禔(+5OXtF|?!DL8E|߉7˜O'ȇ8qU$)Mea+:'OϷ8Y מt dH"Ed(pĵa~a4]KK K[nf =7Z-˧`:{v僁U-k,/bYr:~,Bv Vl<`I)}4bOdrD`>vP kE >*P?}|D<߯!p3qO!seP񗥪9G7zvٳ~}PDxŠ\S@Hq֔T0 ^$ ,KnMQ~yx> aRo$A '׵ vC߳_\`1ul)KWC9hrM\_x#XCM0 Ϳ 8 ARs (8А!D׌‘~WK]8 AqDBiP QDŽ#%pBIq ̧f%9FAIql#_iNPS۩2 < ^e%JCR==MTꅽmqd6^g#aIᘤ6At(tjI$&A(%E U2m-ey*n9_Q?Q(:|Wstܮ4_iזyӛ:DF_(|W3*󺺡gc㼲џFA:w(y{{y9^uOO;|T6\A锞6%R&oW|i HUZpy<ȍU5r`'UZMg=kI(}O՜"Rm6I .e&ef] ($c#>Id81$BX2h]LٗRnD/|M[QґN%hML # ȼR]򜧼4ԍB$VrEd}nfs 2B8ޯb~T- 7>W6/0oY 7VyLE&zQ3M?z{Lp0q~2{O- 7t/YwsXASS7'O߷x]z&ɯ-=Y[uzتClO7ŧFPٍQ0* #QnȻЪp&F29}T*EUb[v Ԯ ?PӋh#D24) J21BĦaքqD r0C%˵*c I0Xd:3 .y  \19z.:Q\Y Y~2rvR)Bh( C"i"$ GqhbB-*aCMlZaגG9sŹUa3E6'2bsh]uPȑh"cdS)r16[ExN5 IKn5Pȑh%M0HA~JIҭ* |HE[)WInuPȑM'M s^n6"~[5@MABF#=X[NT*_`KgkE!tT jgJ@ztʹ=E6xGG'-c5g7{e_wyNܜ7l!=kd6Y)S\EcRI+vC RVtImG̢{k8\MzVIV E`<ۊnRAlˀVlzAgxe[=<%%Ytg^yPgV:3 XVvF ] qOHWoyA8O.WKlKGx%hZGf^%7Gxm./6&sc=@})fkdAS gEF%]&8J x,t󐏷3ÙZqƫLpVzjN'oTlXrGd}Hn^MH "xu/4Z'rt2,ߪ+gN˯"CBl熄6( eZe%趤TV/Lwl";hOT[|T&Ojem]=m[8Sʱ"u+!~*+1H+/ctt'1h;"d;yMG C/UJFrY.^0W ڪ+eh%l {K|iaa_CO ,=NQ4J9b`eu=86xe|_.F+r\ˠlArе"#LHLw 3g笣||9JQ;::/m(xÛ?zp v1Ki @dpw31F.99LtR(D Cl`Ť^/6ٲA1LCVn1(օ^wme_t9Wt#=oj@W\tGPi9H^'9H}9iڬ>= ë `+moQv|<_r[`}[F"[1<n) %ܔ0MNx?Umt8R  ~ײ..CoC%s+ a4_ cf߈. K0d9O# \2WK ~ `]Ps5|ڇ( xGXG{CNʗٔU-g M 0L:FY?zg-$D(-;ӉX21Wi;gjQ:u8ZuK]ial}PZYAVfQNVa=M|P%P&҇'@3Wdۂ. Ӄ c,)\no=/tww^[{) Rw&Z7H*:*Xʼ:17b²7R{pQ(~ib؎F߰MZP\y>N+J م{qf~X=wc_KtǾ`kjEbݲ!HhE.+5:x,{s -qXo}aks%toG>v_3}^Q8/!5v=R=Š;E'Y}"d%~p&.]M)huԎӻ_s{gM]h25wu!.=bg"3=h[{%Bݹ{cmL"f1{i Q$3KA{mO; m\oG"%cvT>7@n{,C6:דv1`,2?y8MKf'iѯ$;X\B4ALݧz?HdQAH% u:KbQ nʾr< X2 ~yg`~};^v~ms6|NIT-e% ~'+В%pn{ˍ{ɂ@|-d~9qDp)ao1Uܝg<w癸;ϋ7ЍzYE0.F"\3ceSHZR)!M-LHD eyKl6#K'd2'B -<$o|KJ# r5"Y, \nOŇ`T^4A[ AC0Wdo{' YhxüȆ0)ܔE dl2y6ng|&IrM6Cjdi" bŒ@Q0d8+QGnM=#8r612N0ƈh JAS#u H(r0N@&EiU !4^*dT#BD=8YD( *ĊXğMr$ʄ4&"55T#W`hD贵d6XΧH,CgsH?$P9Ɲ ]%85jxDZ)iebc '"-ƒILo"E@BUV%D_ O bԑF #UBQ (9զ8"d$( C[tGbeKP9_oo/Å_0xa@&.YUĤ-7_u*֟_?/_j\Ѣ3 cK|KA(H'%oo//'Ӌ} OO;|d:~NlO-C @oWi;b~OL ˓ m -d$6d!#n F'0d/Dc̐ dD4NF*!ͭd!#tcTVq,0I㍧V7׳L d,ұ[]Yoǖ+P}1uf&F2j, $ظ&M5)RZeK"STwUuUIJϻ|b53~=~×r~RQBػ/WӭZs_]Moku~9DP"xDx'@!}BĶ^?0UcmcID?U?p-a`zAg]n]_bUM-U'5s/#Nk˻,%lһFjC+ | g.YC=wv89(G;Н[˹wmj@ +ȝ 8􀐛#OrޅK@{d$06vqD]|vݭ2/$/ >ݽ7Cp ɗItkCN=7abd^chFsre@Ɍ&#ThٍRD\Lg4#zsNҺn;6ݦvp%;&i~+Hq<^{A ! "IM`I2 H.g}I m 4Tw}ZHϴKց$m5Da2֩#0OO @ԙwSA玠 %JX́Xu-{OXA g}G:(/dKz"6]|S乑bvMSvn6o?b>ۄa00#n:{dfЊeFhh1@EIYl_/0ϫ=h8g@Du3f-{O.^N#;Y:ܨQ -(`j:CC(<z'"op\WGCk,/΅7Ѯ.q~Y BܒHʽAfpE_ERoƧIh)RϣN7ɓģBAtx@H$SOi4['}\>nIO(HG`/7I\Z0A6dC(Òx#`2?ٲ})p\PU6 Ÿ["JJvis,izIn_IJ^PɦGK6KĖ"PFu{Et`Az3)BxP@X$*W'=ZZ$R^M[rL%fH?gĶ!ך?o>ܯN>!gTe:WY& s]QRƯ,/U*cx#zƺ ތ(LǧedIΜyYFQ\Zw.ߥH li{n ,rd/a7 rNt0gżVgaȊt[1ox1.LÜKr)Oɛeco:5!]ڠihXVwYq -, ))OC q;V"xD, J:iӂ"E%\@C..s ?܌ jń@//~WT@ I[ 4$+*(K̈ Hs%fibԯdێaHڠk G+yZQs7{2i'~M-j6NHߢ{ͩxԎP %%vIan;TJҒ*}\q}0PFʤ{H%)(`egzF0W3k:"ɦqn_G̦9tOA{6xGɲ, */IO&0(ق`v0)͒Ujᕘ>s'h bvo&{5mEnF ;b"$`#[$'hؔ-EA䚎@[4`kH`#V_r\N9"G dXuY)Ѭ"Xnl ~lZL"\L:#,ӈQ9_Ee@9{Dxqđ<Ĥ KdaZ?-nsᮟ ^F&,ЄPPQ;/Ј%ٽk>}rpB;Jj%zehӠt|dxFaaɳa:''^ 4i qwNz8(2Kq& uwqaZ/,0hݍ@ 8}LWQ0!4Sc1S7c&`#?W.%71K;;cDe~9O\C('WI((6 @q)ۮ48"#WIk<]g͜;nǗ*JRUn >tf!qhH+fud㐔g9$!qQ[{B8_XvrI= a pݵ%ۿ&qXv;vz_/'fk57M|hEWW:{)RT`)xkm iFf8QD7B{| a'7zjO|[X@K{۽\gK7NoWsko/@$5bX!$ "*\3* Rܸ 9Fc UmgWjaQVZ|VM28Pzn}Ek8#3x=Dk ,TʰOr6n ZV"@i_JdDDU|дTa@Ċ  s > 8g "Ki0uY+ qJ6J]-5* nI˸5 Q&e+>& DYn`dq&ML%8ZHT0XT"]AʮB}'7{׸Ѵ`$[2Q V0PJ+…`­T5X nUlHf^ RFYI'#΃rƽ/v6n 8F퍋[a<9ka[Sz5Bd/04n;iDh,iOLȊِek1i< KV nS&=A|F<`ޗW?]v=Zݼ[G_w3蜣[TV\[Nd@]%n%0PEP G3`"De`-ٽh<ѺE%=@@Q ܛh ,c _3bTphNQ©ȣHB@HpH:44*8=h>@ gy ƌ̌ 0Mٕ/UZ!IMK8L2j* sԝI*Fgaè a71xPaT$ {@H H~e迨 #pż3Qznjcp:]0&)Jc(`(}HcEED{?M 7#N =s۬;}jý]̒wuI\|P=Vl\^MwaCF &X*]1"EM/7oF|ج>YU4_0<" ow+{X5ڤu:f}%M"Fz6dYuo\Dd7WԌGMJNKn;X|YPMemݼ un!$Q/enU^Fd00\ %zlCEX7O D[Tt͗Pg@Bq)G&_5ᘎ\uиe7)1~6IHմ1\ iZ6Y?P !zr Г0{?3z_ \CU$%)>]t%6=}nEY9r3Pdr1fޭ!]<8=.scEHp `V9' -To]F2\"$ x- BnIDzĔSX w1"TZ'jFs` 8 I<sIG5 2lg K Եgp2.2 D2%ݒ`,[aqCZq/ W&};Er(tҠ*N|J2 L_fѽ{Q2t-ګN[S^׉)PnP Ս"$CӵU?}w 쓾{կHsk}(R.;l*bi'xGz?z=Jli ~_iTo3t|o'}sC'?N_ݬ~ǿLVUzz ㆢ¸6ˍ,;7档pCQ~3r/COWdчz8:[ԩO{y2egQaUI耳7"ihd45C^!cu𹍇ˏ!ހ%qΊoK}Mvgo /=S*F;$T+) "Sȩȫcr"K I? [8Sʎ@#%]u=d I  /6&GidBp,\d,i%LtF%))P%!s\`&dAgKYG8"QQ=I#&L֐vqPpKH:sR|cR<@lɚhJ|0S92a1ڑA#Πv"M[5Zb`F2EkF`{s9]MTK1 4޸ ZJ'^Ym%אêr昍/Hay0}ROն>G~Vbn-zS ۇ(c?x jSYF`!E1p Dd4"_ypQCעDZnS1@ݞl wc DImJYn$Xi.N+f!ЦtvC@8 Y'Lz ,i9SHl/C2 ɔ hEw:SHx+C "$<pg `k7AX Sc!^$輌R,ZN Rtpmk))F-Uֆjʌ"Su>ehq(e4ƟO>FY+E]t74 2^fɂ ͙9O8lDA#@2N>Ǥ@`Y؈boGpCKQ{77hn8+pQݚwY:vfK~Ss\pJYil9KfQkIG%Ɨ%LB2P@GB5Gx%+i#x Kh H@2]E%R^LI]?ri|H O8 I(L'dT-<)}wayhZQaͳ'_wa  n+TuUVPAMa  $#;Ȑ<&P6'(+ ԣBT BDjVaT9td"{bD*{-2 G#a@U.6T2y 8uȐ@DUc+3=W€ !mQlA>o_g[zC I p%VWg's$< _L(◉"dpS[h50Z9!ǭlw'W7K?[`B#"GΠsJ7BS,m7G%X k\2Sݣ ]SG)jq9y3}o:`Y^^.6띷^Tݧ _T[W=̾;h+)HkDvXK*֤+hf7:` ERc WBm4UgmiF.wؓf<Gv!] E8+wuJ+"nic*NvIqy/4 =nf˲% vohb4+Z+zq kL͟qPuH=_> @Kx+!znԃ%D ZiGքcW'ɖ qGRoaDu"Ҁy~POoH{z)(Vj8QJmUkv J@HZL1I"G_Ra\a/DjQʄvl@NcPBzgFaA@HvYaZ!6qH{I0D`|qԗ@2(yޛ_b;|bzɉpv:9%B^"B>¹)cӲ_ *ã ADZ{w[]S.ΞC!Or 5m:]\2(I % VWg's$ V(^rFp]]}mF~F1cufkhsh//yK-,[2cX>^Ǫ֫^sI~h^FxawhgW PARQBq-yk[1T=i%Q0oNVMqh< }зs(x?@fq?3j4tX>sTCW]cG9JKJ%'رCiÇdũ7QZdU(+"S0ZـFGr3`9]қŬ6w%ff֙'jT_ppiGߢ?|~f]0_9tum)^Y`e?ޜ`6_~:C3)&|cgZ]g^~v on- p<~g(jtQ OɳKCC=&()8(q/Pѫ [9zaG`9 ܔ;&w`~"Yb颚_ݼ l:dm;9^#{7ڀbYA#;#jww 7_"Կ|P0#%| !~sɋ X~T?ZilżC LVz@U(&+SÑG[hYi.vz6BDXyhOEey!&2;<(cbQ u}fn$ny,bqU '8qOyjΧΉd"HjTܬë?q0fz8_PŀJ`mهu ̥'b,:,O"G)pgA,ꯪb|ix5onCnWU7\")M/ h?:-}x~5%8w9MBu fFʕ=D̚7.}hq`kOg_mR)s=:,YfTEJS9QJfYfl+g(.R`' Z $:eAu祦RTDz\1z"2ryi.p.F_ӵIf)]uC ZiXF2b$&<*P)\xK7O HӺXNxÃaᬬ\(#$PΉ jGk+'7 ޶"L@ :j I=^r\2/NpΈ9~h]x=uv;q;hV7%\i8&8$ꨄzG%?yyXuĶG`oqVכC*l'BuݘpNp{ !;wt8qE2InǷW^S -yCK7T8#۟%]>'U+}Jz^ yoRʋ :mI\6Iƃ\?\#L3ec}?sEj}SBVb u۸++M_ x1}{'U<DܞK 9!YgV 5'fQ&f Mfam=P+y(ΙYP״8]rTXppvFBl] nԂDZN೹ӕLJ/èu,H*h>8SϩdH4òcƍƾd>aCfʳj\8>eO 0c/җ܆FJ9!J?&,gv) u1.|hlFi|XOic[1bM[;ǟ\s.Hi.p. >Xg Kr.NoH_G4<=N?sq8ߡY45k-^|u o/9(ZT" 1dRJlIT ~>@=PS+ ./Z26_5e)wb>-#_޽Wh6YLSs9 rv*WwRM,wt5a-s`X$ӛ|1y+oNbv;Ja62TUV_[]V7<)1<D'ŘűՌ%*Fq,dF*,?|>B-a>gv?;Ы|1@B/`†@׼3ߘҠźeߒ(پO5Q&OD92j}X(?ů˶#Q0hgkEaE$Xτ#$$B J2kSAkTOk~(+B-ʜEw?_J2VPu+g ~z3x X [eH|&I]@JBZCo! 8!XS>A;"/&ӓA$B"θ!M<< ]kH,eSd H(Od 0 C+דEP[_$׌RI 1JD̤\sJD'X8c2Mk!($S'YFr@ p)F !44LT 0N5<&!GPIac~a#Pta44H(ewaXqEk ᅥzą繸f~O5]oo~CLu _( ۇ\҂2IX|E<|<|>+^zjO>|w9 !tI7du/_>+V[9"^s?ƣٍw&+/cx6`Ed/_.-c|4: )I*C0Rx~!g_G|\/w10]/՜ F#"}ر# bb^ JrV,7&|w[|00U[I-;e5״WJm.]_y]NyMA!gXH} G) .fS&u鎉b;|: >b{ob'H+&ĉ Kq-Lґxy!0(=7"Eښ7W?_5}\TIRR# B7ݙe WϨ8ھߦqj"ftþti޾epW(+fwIQ֕ԉ]_poysmÍ7p;]TW3M_d]bSp˝ԗn0+'d[ѩ((zof01[L+~W9F`H t|&/g鍱[|àqlR#bɠXKqԎ8j "!c?.ب|umWr,Wj%ߣIf.1u@HZějTWȐ9<2ly|_HDPI=$0L)\.R*MB)&띵5ڏzkuT$ 9>(G_|4xwfm) .fýIkbޱOA/wF_o}=}2Ɠ8/ /{R[qToЅԽ^'.E퉷bxA羷P$[w Ahn+ ں) g,wB^!h&voSݳ8*0-ta>9{fIZp.(Cݷ^ nQkԪXh'ק`\o Shq5@O.XS5ݝ[n<ݪfv5& K] ?]GMt&48V.ȏZ5VXND@!N [C qwGlH2^lJwI v+Mrn#5o$:ՉqSvZ+=k9!g!0 ,TwWP-hA1V! $hoP^'N7bWZD먴շJ*)y}4tDi gYjg&IK$2˵L$b 2ѩJ2fQ(Od 0.wFu [ݹ.ւOӆغ81*4I KaL&! ZP!X]9,]S[2xo&_̚ꦗO5#A2 plp#XFrdVN!I!!#%fgcL 'yF1f1^cԨ2TE5o4xqKAEI}G̢T~TА\Et-FK* I|Guq%=uhuˠ!߹Szxby=ݵUߍ QwZ}KtR_O畿kvzw !y=;-R~Z 2u56f]Δ_k&w `,żD LDrT(" e I.z|_79?w3TPCMْCpD:){@Z7z;qd皻A],#n?R߄bDX"ReJFANAƊ+ (J( wwAk)?u?!p]#ܡdQn~@ѱq[ayuȐ1s9ː_oB+VJh'*F©I5Cߦv\y1>T>hᯬOݝlӽtr9tV+ȧeO^"ULmҭTOUW:77`]GWnG 9t;S\ow_/<>[O-û3 + B0C (n=2?4xPdlC^7 RVEHtoc0@e۹dA~!F? ǂMFbnFǷ=G@6fh 0Z3AS! Vqmڱ"F20cCp0Ak 8(USUa mnkl v! VK vI7` S\橃hf3i^N-H{1dĊ<5o4Ohj$ٝ);US s7{~RR- ?l$aߢtާ *6@"t Vަ;qhMb!X6|h$6D"EG)~wTl$uӉL ovNƠ%!C+QwV9{EC|Fdi8!POcRZ'Vߎ h Ŧ; UhS nM\P8"6dlqGVE@W E]ֵ:ô6' vnӘ]>=`O(( %BmOwn2rAA :j3sy 8 rbrБ C*{#t2{7NwZs¼p=9&U)@qظVǨ.xOHG]Qbo[jBPQ aqrob3ԼPR͐Ul Vk@RsDɲ DCB,@FUdF)Ӻ1p XI&_Qc!G2qdMƨي0( TShk}R tVxTHR:G[./&B$:Dm4o{ЉH"3ڣV.RQ׉myu9[2 1w7֯SF xcYZغyUJ>k9T)ō4ѻh1O.oo;h]VCG ] R &l&wQ)DyƬCn8j"fQ DYYP߬+)~k)ف5X)D#J(>'`tdP.ҧ?(B21YAC8 pφp!M/?!#˾{d"$ yw;ߪ"1Tv(P1t L! Qw@[&z~Rf&Ӄ,[2|DžQ#VDX]hFeU)Şwߑnʰq򒐫ьT/!1C/  $a--0@ sZS y#A {v(Z;rd,Ƙy'Z=h|u1MoFS]bg \J V}vEarEGg!pǯ\ k)H)ARTRqG9:X!/ =#]_t>ǂ ׎cP?qH,;@0t,zbp5s52(P5Ls9֧E?h5y"YSz/HCƈAdb :O{l$l=]A~TÛ)UԿ>TހQmuኡzb㏅ Yuo7J\TV⢲hZ~%!s&D!H ʰ J+I (fj5ooFfiQR_fT{G^-gUv;G+xj4)_~js$m4A^ YCBRQ#QNY4k #B儝.dTP adkÝaRJ !P@ Bj^\@42|1Cb33r!#da;#-ť-8StgIs I$Ha4y:p$" ]A07H"ZbR]T]X6H$eގr,a?3Q"0ӹʝ,mcS(h^W )8YV9/ 阢RD1QD^Џ3_7!U.1'XJO7psT%S&kHG!M^eӎ`\u`4\ ѷ>NKgC5lEIƐRB'+69LIKXX'ֽkePX U%HZna]ikPJ/UKiRlWzwQzݚ.3U-OīS֟ξ2WO?0[T`Ї?k<0xW_mrߥn3FMf ﯯ'ooaQ;Mo;jq)JXWF.Oݼ#)HbBٲ8O>qǠR 8gGkO507~^VV.?ZlW cf;d#hk  POTnRK14dY_MܢQsx=ho|^*/~CtE5 b,3Hl!D[ J,&\H ds,1P'q^ZS"L! ,͂pټHɽ 2ʔͽAi} KFx`;&cAoFp౗>7aے]L'[u#C-"79u-Waa *,?DLa1zM4{lP"9 W-#XLӱc0$C>"ovt@e{D dㅶ*2IB|QP]sl313kuzQ𭺮-:ܺ ba54r3G-!H01o{f# =GtӅnq܃H 8)"J:gO6sN%_Oz ҡ|q]~e9oƸR\ (H-vP3 )4'@Zqe`6s0˃ubⷢCV,|UbqW̰4}UH6vw_o!hEQiׄ D,/_n?O΂`W&95XZ]Lk;όȯ LMy6fE{#*^˖:ac 8d[ XNd0n [BtnxP޵6nc"˾]'_CѤEvö(6ęlGqlnA1CsHP!b$ =iKn,'E<ŀFmǍ="8/9+Du^7 ZdruAKA;q* p=x^\$!>}{s@eJ.-+ng\pa>`I Bv3dܚ؛ At%]RB. i)[- _0ba}2oq yI`<8€o5:nZÏU5ffTQޥ!G+zՀ\ I6Q @125ZA bݎ^Buj- gionHL0rZ%1q1 eE3?͠B4M"vk3",BbAvhbC"z%>}07I- r|N_1ς<F[tEڡTϋܱCEH| w `Dt~^jV^}9O[qLQf־14ֽVLXX"# I*6aciy ά{yPS+6"d aI˳|Dn8=;HDiHxƳIx hP]xRfa@ 757Ƃhc yMT>ug=^Gl qډۘzCcO~}n.EC<2>U}f .,F^+rkt$Sr/|WG^c3iƕM* :(,kﲈ3rA:xk~&JsjȪ}6j!93Ü>.?&85O9> -:2Z˫+!xG{{$(2a~H"v`fIG xތ&*ᾧ!Qk+(bi3,z(b;MihE^hۼ`,|\ Z|L0^\qTz]YPڕc\%"& (1:v5]3f E &6Ą '_iRblؐ$d쪦% 7][EMe3v0t)*^iO܋჉p[L(MlnLߏ %CZ2_Vb! 0فOY)^3› P_1q~kN|L/^ 4 p`_CO{, =喷<5:-*;BHhTay0C&< W'{5߂Jh{035QRrtw+ 7].^HwVe O]J/M][mk*+=r82AX(_ N{dBO6+DUo!q:Y:!M ϴЄ*]7cW&k]_behzVhz ELrҲXز1 iժǘ ryksDJq1qSn=Yu=N1gi-D31l$+yOh %ᔜPǀ Z.X_ 8ͧV ZxX5J´+Zh/R\:>GjDe@QrLh16Q8Ąm_I:fgM,bSQknz#-'T4pJݬMyÐT |!WR@$GV Q&E)zC\e윢>xk<{ˋI4 j7G} ́z-6f|' iEP9=HcXRbXHȍ],ED}i|;E?ǫax؋xpڇT8'` kq3wrFhەc>AdsaLy 9 {i^ߛoyd?s ~d-|syr~ V$f LbȄF$θ'?~nd0 .ϱKpbJM)OXefTF\S6IL `EQ" r`PXhyU:LqC7lc ْ^;Kz,鵳EK OR%\XrE2@eI0u)B` %ƈdFpXj~qyp:agY:(!bE??J#? 3{[x}4Mk?||>>gg9~>Vb2S$2cB$4@0Rky ˀxfY$J˘`¹}*( KƑ:dbHTŔ$eeԘDR d8 eq<뚡z8҂x azkWҡsܼ4߹%5ѻ{3-| ?Xq7woNgg벓o>^PT rŎo -mcdc{OH0.%2qP,*g&B kA1Nb8bIuರv8& ,ULC4X^C2&KqqI"iz @*!Jkб$1ŠgV`ES3`#4N7HU/,YZ/p8cو1 K`eĆ-$&4NȔHRB%ladhb.k~,8(!eI+8 ,8H*eS9lHXcf`F4 ;q}^s,&2|LHBI!E 냣% B *bR"I2:'D%`;!HX(*J RNk\GIՑhcA112sLbFmf Ia2ŜY2ܰ8cM-$1eLRm%yL%$c,#8`c8NPlEy 1zTmǻ"ߟ6h[OL= !z\]~~+K?²v{ADL g3xkশ(!ž?^hEKpJiثa$Z1'aDqWЩ?jhz~0xأI5| X۪iӦ/R.MʂV_FihQ!a G3ƒ9ӳq_h/c15ލV|o[h|@9,72TV _J(\WzsSZ)^O2-@ƒfeEONܭ[%%Tϭ"P찒%DhuF h r$o .CĔߕ-z_?6g/64,&KN-gK\lGFonMgo?7|=M67zl1 $:IAe{0wX"V*H`n ؖ+CbK\Rnرv<ѻŽU;j϶\ L6wE۽ev{C ?vڥ P%Q`TV霰{LVV]VVͭ':k^ƽjqxy\|c[Fs!q{eaᔘlچV;mzϒH13!ۃHbѣ{jڀEʎ{srC{:݂]H¥]}4EyraSAPF@h$s2I!|"9~6+vs|N;Eg,[:t<ۇW<,]N2N\:uhF]إy`O~28xAՏr'[]. T>,_3#dC֘4x'ʺ1FH֭.>C3Xά[凧ukb |SY@e~~M*W%} mտ9 &/zE쾵WZ \uN@~ZX7/j:+RtMKf29i囟L7w[;[ D0 ˛jQve6ϗ|P6LG D*DqE0B'.i:L%Tn$a|c>~}r>b![||N 4L C&Ī1c&YH[1t=|ѥrMQr;>'GzQgɡ)ԗj,xeZM׌٪}<1O{-x1A(;n*I xB| ua{R.(a.W֋ (L8V_Eѫf庿wc`5'Vw z^ZCkVlظ8tdDב{țnv9~$e\㕂X]DeW nY@yv;["3igw0{8TXUEOwGMkJHe^ V+E-A?nOhqE߸[ D50Oۂmּĸ[ɀ /Ơio2-M>/ 5<'Fh Ko4j'Kr[p[1xSK"lk<T&C5R1j ázaNt́2aZP!];{4cdT-&a\U" U$` I@W5=S/5m|Gn\TaB> ZuDC~3tJBR3^m]/ |冄-xvzzcaXU?\}0g==Ix]Iu;/-&5Xc,*oW+!>0E"ytßWNEAw8}I ݰ W&y&絛nr^'7@42PfpdF$f,DؘTbސI2EY8GGWN0{!}y`vTVt3oOv]p~ DtaUE>{Eo;w}Cg ɰXb)GhĸT2M(јf,Ib,>*HySc*-GBX*SJleeZ",!mgj۶_˽x?tbӹI&iz Ԗ<ӜNHYI d6)<XJHGv.:]pDJxw,zZtoB}=MO[XY?yk 7o2W >ugU_M]3뫱jMۧ{m)G\jb1{4M-%K$gO  5ZK(Nfb2Ztv =5c3ƣ|O*_}jFcj%!>y!$^l#B5[/ S*'!.ؙ'/Tf98PTX V:)CbU+r+D @} (N{m[ 8~)1ZO }Z۵!E`3FN[(݋\D` -v@#`~ҏF9Tx M X6b9uu(w}8$I *{s9d4eo*g1,5`ԇ3twڔ`FY6'8E`K;azObIL"TX7)o+1Y0!7Mϕ!"H73|ل IHD)6 e0,f$ƌCXiȫK"1)A81D!X% (<ĂDNa q&Tk \ Y L!&1 0dQJ $$@Pa`(L# @xV{k~ޔ O~SKLVi?2Z=>_Vxgh/+$2SFo^D"\NtY/6Q[M').5`1qb.d"F<RRD&04PQB  ш(&;#б%B3+re毈U + l9̳8[W6;Z%idw#7lmqVIGh)bޠ ݤ,rYṓp2Lj9Gnj^KJo5 ]/~Uf κ1x䬵*1Fr(.F?fVF/Fo>Mgo7(I ޔF+dڥihk`~{f]Ayu;icÙUM?X։+v?~\L-0@W V~DP~rfZYsSxoCVhאK+Zy6DY'כuW,ۦ9v*$i+e=䶲higki 'ZWJ/jNz^:?7OT;~N+@&ZcXob>qzd2tAg[8J AirZX t-` W˼ [[yBmoX>oy4|-Kr%{Cؒb޳Js6MwHBwCW 61r6;~~CYZ#(7KMCdrۭSHݠ(23Zcy놏 ,CXPfLATْ e;-+5 Fvܼ{o;z*:+nisDG.pvr;b980^ؽJݥvMp8;wxB{[ٍ. ƀA7Z?ݘRpl֜"K3ݬ&3Jcis)8A "3;H8 4\@Q@IBDeHRhhmPBHҢC0@s\FS+5V9?4)pީ!iPAN[t;7 ?V \_{-xhnn[s)߯{vK7;iKkYx}5#Eqk@Pefi?ֹFY2<'sTxeL]?kt韣}uӭnv꧛lr1µF+\-_inԠ9Ź3gw1 0uvn[,Tݕg$Mԕqw2I6>U,G^CUIduqmuP@ԚusjiݚАN3l2M])R n8Fw8ň˶wq-{R!v٭>S&C*2ݎ\Z}zS[c3G Vf_?a&:by'ZTI\~2??_߸37;s}.}c $QD@2 b N,$0 '$CTp/#4 }55:;eG ]gE k>b=ixL O#8spi}$= lq;Fzۥ0[NW; G 8!CwsJDW.z*=-K$*ر焯5:%M(M75F6jQQi" V@1(?|oc7 TjnDwl5NfM=&Z~1߷?Unեbӥnx|H̅L!CJ&* P$SAD@pPF(#H9]0\P.g7[ dҳok.~37Se81˨+w8vQQHCM03LBD$ @G*!UqD,E$V DvLwaEmzPr5,>p9aWmrkUdh@B<\`=,@A+ #(!C8'<\'E朝 ۊ.8 [ DHH,  c2TH"AØ$FFB&BvG^D9?G^l=׏|_~f/sµF+\-_iw9%x˩ wfc~*fgW sN6ʹ'ٜ}RLѐN:`goϝth\1Q6X<ͩhѺ9 jkݚАu Q8JkРc#hZlog͉BH!u7.D ̿z&7Zi7;q@۬7 BhkSw!u$xLAV]~r1QA½2w{>_|)ǖT~|Nq\vD7,^20Kna~0 н4j^U6{I߄;G8L&Ʉ(LB  dغX.|6Tʢ~\QY&y䌞mjQvVChdˮGcaC"u솉E9OV+x|JYrIJ}|JQ#܊2Mi~ŊZsIkѐzeNy]"XTbEKYy̓$(z|: fJ4:\6v٩u|GJD1:pWza 4W"m zbbp'"גs( % zW[ >\f Q|2>A_aU!Ρ~,[![虯z`NJubɅYz-?{?i2tQà;r A=j*@/eI4Ol-DIsեFd2_8!D |KȎ! CX6=q0 6`'c0CQ%M30 O M#&<|`8;;9gTx|8/@$FT셠L N( Lu5F'md#$BHqP\"4CP4֯D`*!I" odW‘Jh6\I`ҫ T"Nb)9QdW1#cL Pp8ר'%Lǔ'(!իi&"A$TK@%P+ @"8Ҫc D1Ƨǻ`f׻"X&֛dՆۣ)j#?7ui9-y~<>__j'Ntã~EkVh:?bZ~:t=O$;*}\^ה|ٻLo:o8G޵5qcnj%/SI6;Isy\nь,z(*3IS͛4. y$j6pspKO&źm/;%@BAf&l(a@kjQQDSRs{#&T+d^K4=9ԼiVAvEV $ e?ȟZJa$9C2 y PiH1Znؐk!뒁l51Vg\!_?z6 Jlպ7zSn_{} A { C˛,Eh_HRs_b$` n% s $AO%" !6 ͝xqpF'% ߊ@ԄND'×d>) lFiX[uBVs9AN,L_ѱq@'"O\BٚVݺu]'E̺$2^6N4` -R:0p/ &s\+-XGG~&rS*<:0-o/ϼu;j,H#Y!Fi6:4i_sR ||Iɠ-81OAbى1>+QW'W%ߗTk<,UUX&QRnXbY801<@Sёe/OLo{S B&=\Yu٘p`U#?')}7NJ t s{VcM$[c`Z% 3Elڋ2o97gs^b1_HZ@/fWa3׾w WYj'G-༭|wVq)zػs 3ܑbdB,I5 I@v#4DJݣz:mrjqZ5ؓ_ЬcR4(v `d8 P!0--}+ 9h)cTшYKyA@21LK\)T#tR{c樵t.񳦔 eNGTB;XƊ;pA>o8M'a=ٲ.[aXS DXCZbVA({aJ+Ć[@kwh@)MLBH|p#+}26JV}㕫H3I!G{˜O^^ l~1cRo=}ۊz!|b!0M;P>lQ%"ť=Z\Fl!ʧxF{zGS)aނ׽dm!ȳrK$w'T h8 zHJ8^iQ%2}|e=v>uԪZA;E'[TV 3wՎ:pkJt.8 qm4%q埵Zrw< .͵R/C~5[sWd5~q9nȗnܭ?>Af|uU>'X*uK7(La=ɜ?sg5Sْ=Խ:=_qA'Y:|Ƿ¼q+74A`$pʸw_;ɳsn7 >K ) > τғ{|8H;cE҅lL^'Șʳjnwݺ9 MzG~Vι6= I8gse%h‹I4PY 'iT^vWQV(S'+ ,bl@H" +ou耙Py8ӧΫC2Y-$DrxpijI#DvˈӶۗ7?ŧ5i9D`\,;tqSܢ t/S.z*Hi|FGzO臑4˯snbɞ^${V),E ڣ) U+xM9L^aZQцExN͍*%X͍] cT 90 yxU\Kb߹c劂sRGF;U)Ai{^BY]K \Q5(sn5*\x?\ܯt;pp0n\Q./vUJjCʭ\J! A>AKJ>/D)bM9AWG$[Fø%HeL.ǡA!YoRV Fj9eu\Ң9aB9#:l[*Y(SRVk 2w+(e0B9]%yr8ӄ9CA\¨_J'}܊AS6 H#b*Ćt] YD/ X_R@2uIђ[ C3;&b0ȱRfzni9=p "0(sXiC;0ʵ'jbF PקX+y+!Q@dGDE;Ngs[:)HP^49+8g $'p˖ hIzKN[_"~ x'(ߙ'ۗJ ix b˚TNS"qLgV hxf>$ץWḂW—yWâ_= c@ _WYA']_߷fNTӆGմIk`QĄ2<ʶ?",I7a ->TT+d^K4=9Լl\!IԭFI\Z꫃Y-vveGvɾ^,frq:zZFٝbs `O?v.s ~r}{ vA/_}?/$-O~ٛ-2^_ߖVkFOd{nX kYs+FBE4J^'j7!i -щ#DMJTNERH)AiWE,vɌaW4dgsU$#`W(A#'鮿JW旻NbSެ_mq[{x?/ѵypq/ EbPME g6(h#05=@zzH[1[JJ{zpbsM~tsJTB/K69nq^_h/] :jF)Éd5ܩ]%6Y @ J^|۩D]K\G-/e i276x,ǛiZ (9f\o1jjMyA XQ}/`ѯ[Li֛Rem7uN?vR3vPǏYYژ 3f/*[Ku쁢.ˮn%9GzjW-% rX$-\*st tMz `ZW!'(~eNZY!`չs@  ƈS%R)k5(y<$M$, ڇw6(1M !<:Z =۰L6sj fONY?]ǧQ>Fb 54(Ǻ |HHrI.nKE84 iiq&@LN;1vD/uz[%}A)/8wkJBrqT 'z׉_ġLsuz$Џ;YU.[Gke ƱV ,?T2듼Vn&yόr&#&l~蟧_OS[na+VP\~Y^MZXB/f/qǤ6YN!0G?Ћ'ƨy9+-5L,mJ(hR&]±JU)BVbe$=GS,SGJa#gdr`;-&HEBR/ ^I8lRHGV!(p .? f Y!HPk5+L$/_WVptb/S+*+ 犟#jM}< H%JBpDя!*CJ>ҏtc 9Np[,R$ C 2GQG B1y&"td"^ӹ ߒnÙ&O = $G⏘?Bن!$ )38 -;>;wtqF R i͍-Ǻܒ.7ɡhOc-].lQ}z&0ɉ?Ie^@Ǹ*C Q֐b Pf"TD*$D$ód̑Sm# 6\ MŘXbZ0y/fͽUמ W(Ihz+Hz%{CYڮk! 5mkLY~i1p߄:x}3߂w޵6r#"Kb!.HN\l ĖIKOQm˚K hꫪnhs7,"YY͜ΖQg 2d!'zS YeD <梄 Sx7 bFEHl#PF:Z}q >e΋A K3U.Yn6 C6 NXJ"E{O)466HDQŅLK Es?k%rTaM}w{lmda–&,w LX8驦nUh&&9qg[@0f{i/&>RI0KzJy'iZU{~:^p]o$ϗoQr:_qCk}Nㄱy+p)]{- fVYPmUR[e2 Fj5z$`Fctz|3V nx +mol ,Vw-2Va!/D;TcS٭+mLolngD: ږn7lʴ)C0,β_+k>[3E|}A/];; % 4cPa | 5" '/6JH!L1ѹlYkgGlR#EL~F[y^F,$E^y] 3a`DbY+XE ;%Dn# 1~ OsH'\Ió*Ɩ!y;UQzVtTe1RnD:߀DiJW Ţ>}]rU yS(L-lJ`yq~}!I$GO6DY`W,ߍ|AVo.%Iaat6AwHx;gրvwm_&\=IɘDaB*&j椏*bv15O׌|>rՆMC x[mJ E β^+yvI`rGTǂ"%*)U$1x@YKyC-אˌTk+(q(勅Hg5L) ŋf=_S"ysC\kEW$Z *ˁG.Is3O:Dї"`!K> }8Zj «5 Ѩ 5ȟS:S^<=8yaD0IjV[A)OxD!N1QI.uYc4]־7Q0g3g9I**(q % v p C=Mg ,!20HiBj;L1s $1/(bg~*UQz>+\qc).&e)`:F@ag>"8kE$]?~AT_g CNA9_< 2;Z־Z5YSbg,0.'eY mxB2#41`dګ-n(F c|aZP++ Wnu=L%;{w vܺmx?1o,d,㷣Q> o%>1̬>eeK|wW3ed&+M–,V)AjϥZ+CB\)]*|<.3Y׺SreDWrI N%ɝB/]kJ~/TLbB5O`ߚ `g~[2_LƛI Cc0-ΌldaőcZdAx=yzoɿ7e^GŌ4iey逘.򐋗o}#v%EMe :ȎU|[ JpRm9&J3(Zq[퓣x1p@0pC1z"Rj i[[ 9weOu_&Ϧ(@(A*&OSyY3ų~c]yC?>*u(wtc*Bd`\P T bпg,Ou)+,heP-&{R1^iC!dњTY&׆jڳxlȟUjÄ$Jv )rhA[zw0`O m`lILF}V8Z(QFRK;$m %絽vwIeJV{>I#p']Υw:m-cI<i> bV uYtۓL  $iEtL#84XNf.kNc Qݍ߉J ,70.'@CL<&J2 4вl.yTLZi:Y8MK7gLK@I?tHDW^>҆6QGs&]jTv_,@`Yd,%3 1%D2rʩ-ʖQ<llQRʱ{·0zj8bFѕugKMFgJUV2<;gUybظi2spI 2IxzJ3OO Z O <=Jnʴt͞,ſw颖B~j^hez2qG:C[xKϸ4ȝ=QbsL:oplffܞّR%GJ (Z^Ke!fsyإ/aq H.NjDm*GeQ9*KsM̴ʂ<5/-m:rԕ=Lq)d*W]̐te SJG}$=Vi?L8K=a`cV3;sή2"SToCYom6Krz͐4YN_~g&V_~9qe7oSq W _MqG5?;PHoPO4.ZpZhCjd0ͤj}TljvOE>DhldΛR>rJ,]tvRyQ,V+BR(cr=[*3zUSm]pdM(a2`PtJ Dz$)@Gol 5Z7Эg;!$8(9=[͓w|O0HV)Vj9e!'2ZǛo XtSn;wY'Z# (\f#]I8/̻8\a'@a= Aŀ*%923YY\3R aY4QnpG|8Ai&:0=-x~UXӕw16؝ eO*@DDOx/ Bgȭ_Ђ@=>|!,7.qfy)dJ.DSDFͪ^ AX5lB C%3! 9Oj.Er R]-Ky lad %?H_9/\mm=l9ttqn *h19JŋF `ʼh1B#C3b{x5EH8S.I^ُ( *2O2lUx9xx'ZZxKm+r |>hɮx>4"],'eK.4 C9t.twy K,eC(JG;B_~nh<#"E>xU;Ft&Zro<V0[QG A1J'AA`m*.*p@u-Rƭ̇% +=#mr w%ǛFjZVMª!ZU5/\wpu[;]m?BrQd!W2{%[i2;W{=<F4 L`B3c@iw Bۑ+ VgE76Bk\XJqe ɔ+@mY@R+E F2gZ!0(IS-HBv 2(Tx)0Y듔\f:*s$ J$ςO5 &_lQTIuqD.AEDbH8I?%EJ=z\JoR @*PHdFW !-=w 7љ$ - čTߞl>v7eo/ޑXjLeJ0`>]V zMz6;Dh9[\4}̦iC=& V»fN֝ynWjf6(B9M_J^>5*g~\n5p׺AZHWfC'X܏; QCVXnQG $"Cr*7Bp_Nqِ^9,ܶQ V܍Wwa$[ھMw?w䡶{ޔjS-Q=T -BU倁q?㯏OL=;,=DZ?ըDR_-SJ |1bE~Pֶ)dGa] WGP1BWf+s x@$R\,L6o[77?YOme`Aw)/)YZ>hW%{WbuB[O׊IqMLV4ﭦ1Z{֪TwiYz# y*JL}ǻQ}./i[h [UNq.{qnnnmyDuھcYn폽7n]h3Wozʑ_3sdx2nLO5/ ߢ$ґsёUESU_X,Vt0ʩ2>_9uY{gٯsp5pMZFջ]ի݇Jpz0)azM + 禲H^: Yp feK4D(S%Љ"yAĶG[E>ψl8|=Z$eG aP&38Q:_}aH.dQOYn[Vɻ+lJvUa<,\?$c6Z= gѱlaD@J™TMĴ˛l'[8Z ډE 2w@ݯl so߿Z7܆}ZH6k{]!9ԂDמNѤybb6 8gM2JÙW8z@#8XxzPI4*D^*G)ъhhGQYri78 =ηt;bgX$1/4~d&41mCk;ҝjremӜŔPM̗@%" GiԘ#q­ UHv3G.Mmnwue/pƝߠ碗t Y~_gp~귘{>,@<5~Ǣqt>->j9d xl3ÕU&hNF+bkDl hXPE`·9) /xO:VFqAO&PΈ$&I+p(X t1:Abk5#L!>&OZJX&q鷐Tr%-Nz]ݩv<əKPY04$;yP29%Cű`:&dBJ),T%<@K4HqCW:3!o6mhHYz.۞ y!o'dMdgaQ;l=|Β"{k??,ڞ#E`%?.-PsxpԒ`1%~7ENpEOQA_&fj4ze:G5\녙n)w$V1#rȑcq \תPsf׻/~ÍBp{'s4u/ ;[4l^Y~9[L_僟.tof!w%?i"nI2=Kc"g%z_@:=Բ$%W}sm[P:hchJ-[>c0F'D 2L`:e0GHS΅HuLg*0;vg-4'F 6ئPKgԮRڍ oo?~y0ҋ JeN?{Y w{PM'^P;d^ě=+L>O˳ۛE7q)ovt괼vz?$jݐH:H^EC҅XaYIEJw6!QnZ=e4p]3ћnS˟ /8=8 b,|B HoOPbƼpKۙM|?Ww׈5 FE uK,$YnRǿyКH ӡz, b_@/׎"p2qS¹yNlXK$D/%ߓO֩;*_xn.6{ւ6(j0PG4(%V 0m[+sS:L E6W|YqSo ʉ ɓN&J';-PAA؂ZDl]W O'S@9 (,:-x4J.YAs#RIʃؐ!uҼY<򔊝ӕ d5zNd',xzGapקxiBOq?nrU=g&?ڒ 2(&~?:Ff"8;\ScS$px UJxgeƚ'\Q!8.J&_s]R3to(͎/e~W=CKǷ@ُK"1Eo s7T~?qg+-͋TEsQ'nl[O }i S&bUIE$ (G M yTP`;Rضk'{ PbJszr6@^#)E( z8:B3xfۙe_Fɖ-ޫ~E}y md[F];Մ{gQ- S|&Hz٪zۨP)zwXNw2-ek2-]PmН8iRA˴TlK]T_S 8i#RE:``9bub$J.:d;4$,T[? kydxMӹoUժĹ%;nR%29qo,4AD+ֳ9,4@b!щ(>Llb:X6uw CVѻΡMWv\cB{"LM>hI=be\O駅C?v6Iw7;Oê"v6Z7}QXF~ \\f>ԛ .ǁYcz u/R;e Tb# V¬efҌjY~7vVڳ=\|aM [.m9ltn3';9 lJ}5:``D{ `0HZoٸP'a8"; ./jIĠL)Ѽpك׉%:^ҹ$Y# @%K*E( rs)ubC0Hk Njv1Z.RfP([#D$Oi4ȸ21i,)'i‚BjT(8u !D ~kP_w-hέxu%+r+YnGVlʴ3{8=_(s+{=7+|"6tlQ콬n/{5jUW曮 o-{]z,kf}#ْoa{]-G|&,~9zt_Q8wxUqlIyiGA~^FXZ  Osy kf_;?G .C+&|w j:Ka]8l^L [ao"@paBơKN2W4R| ~wq6KLTP p\*B %Sw?mLvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004607155015136404213017704 0ustar rootrootJan 28 12:20:47 crc systemd[1]: Starting Kubernetes Kubelet... Jan 28 12:20:47 crc restorecon[4684]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:47 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 12:20:48 crc restorecon[4684]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 28 12:20:50 crc kubenswrapper[4685]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:20:50 crc kubenswrapper[4685]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 28 12:20:50 crc kubenswrapper[4685]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:20:50 crc kubenswrapper[4685]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:20:50 crc kubenswrapper[4685]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 28 12:20:50 crc kubenswrapper[4685]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.206123 4685 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211783 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211815 4685 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211826 4685 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211835 4685 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211844 4685 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211853 4685 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211862 4685 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211871 4685 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211881 4685 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211892 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211902 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211910 4685 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211919 4685 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211928 4685 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211936 4685 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211946 4685 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211954 4685 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211963 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211971 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211980 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211987 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.211995 4685 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212004 4685 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212036 4685 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212063 4685 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212072 4685 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212083 4685 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212092 4685 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212103 4685 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212113 4685 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212123 4685 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212133 4685 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212144 4685 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212155 4685 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212165 4685 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212209 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212221 4685 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212232 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212243 4685 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212255 4685 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212265 4685 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212275 4685 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212284 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212298 4685 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212311 4685 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212322 4685 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212332 4685 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212341 4685 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212350 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212359 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212370 4685 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212383 4685 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212398 4685 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212409 4685 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212421 4685 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212432 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212442 4685 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212452 4685 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212461 4685 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212471 4685 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212479 4685 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212492 4685 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212503 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212514 4685 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212524 4685 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212534 4685 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212544 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212557 4685 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212570 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212598 4685 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.212607 4685 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212760 4685 flags.go:64] FLAG: --address="0.0.0.0" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212776 4685 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212793 4685 flags.go:64] FLAG: --anonymous-auth="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212805 4685 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212818 4685 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212828 4685 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212840 4685 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212852 4685 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212863 4685 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212873 4685 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212884 4685 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212894 4685 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212903 4685 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212912 4685 flags.go:64] FLAG: --cgroup-root="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212922 4685 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212931 4685 flags.go:64] FLAG: --client-ca-file="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212940 4685 flags.go:64] FLAG: --cloud-config="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212949 4685 flags.go:64] FLAG: --cloud-provider="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212958 4685 flags.go:64] FLAG: --cluster-dns="[]" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212970 4685 flags.go:64] FLAG: --cluster-domain="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212979 4685 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212988 4685 flags.go:64] FLAG: --config-dir="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.212997 4685 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213007 4685 flags.go:64] FLAG: --container-log-max-files="5" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213018 4685 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213027 4685 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213037 4685 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213048 4685 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213058 4685 flags.go:64] FLAG: --contention-profiling="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213069 4685 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213079 4685 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213089 4685 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213098 4685 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213118 4685 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213127 4685 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213136 4685 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213145 4685 flags.go:64] FLAG: --enable-load-reader="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213154 4685 flags.go:64] FLAG: --enable-server="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213163 4685 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213206 4685 flags.go:64] FLAG: --event-burst="100" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213216 4685 flags.go:64] FLAG: --event-qps="50" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213225 4685 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213234 4685 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213245 4685 flags.go:64] FLAG: --eviction-hard="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213256 4685 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213266 4685 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213275 4685 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213285 4685 flags.go:64] FLAG: --eviction-soft="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213294 4685 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213303 4685 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213312 4685 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213322 4685 flags.go:64] FLAG: --experimental-mounter-path="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213331 4685 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213340 4685 flags.go:64] FLAG: --fail-swap-on="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213349 4685 flags.go:64] FLAG: --feature-gates="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213361 4685 flags.go:64] FLAG: --file-check-frequency="20s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213371 4685 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213380 4685 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213389 4685 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213399 4685 flags.go:64] FLAG: --healthz-port="10248" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213408 4685 flags.go:64] FLAG: --help="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213417 4685 flags.go:64] FLAG: --hostname-override="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213427 4685 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213436 4685 flags.go:64] FLAG: --http-check-frequency="20s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213445 4685 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213454 4685 flags.go:64] FLAG: --image-credential-provider-config="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213464 4685 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213475 4685 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213484 4685 flags.go:64] FLAG: --image-service-endpoint="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213494 4685 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213504 4685 flags.go:64] FLAG: --kube-api-burst="100" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213514 4685 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213523 4685 flags.go:64] FLAG: --kube-api-qps="50" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213532 4685 flags.go:64] FLAG: --kube-reserved="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213541 4685 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213550 4685 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213560 4685 flags.go:64] FLAG: --kubelet-cgroups="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213569 4685 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213579 4685 flags.go:64] FLAG: --lock-file="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213589 4685 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213598 4685 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213608 4685 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213622 4685 flags.go:64] FLAG: --log-json-split-stream="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213632 4685 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213641 4685 flags.go:64] FLAG: --log-text-split-stream="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213651 4685 flags.go:64] FLAG: --logging-format="text" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213660 4685 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213670 4685 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213679 4685 flags.go:64] FLAG: --manifest-url="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213688 4685 flags.go:64] FLAG: --manifest-url-header="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213700 4685 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213709 4685 flags.go:64] FLAG: --max-open-files="1000000" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213720 4685 flags.go:64] FLAG: --max-pods="110" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213730 4685 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213740 4685 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213750 4685 flags.go:64] FLAG: --memory-manager-policy="None" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213759 4685 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213768 4685 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213778 4685 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213787 4685 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213808 4685 flags.go:64] FLAG: --node-status-max-images="50" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213817 4685 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213826 4685 flags.go:64] FLAG: --oom-score-adj="-999" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213836 4685 flags.go:64] FLAG: --pod-cidr="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213844 4685 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213857 4685 flags.go:64] FLAG: --pod-manifest-path="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213866 4685 flags.go:64] FLAG: --pod-max-pids="-1" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213876 4685 flags.go:64] FLAG: --pods-per-core="0" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213886 4685 flags.go:64] FLAG: --port="10250" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213897 4685 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213907 4685 flags.go:64] FLAG: --provider-id="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213917 4685 flags.go:64] FLAG: --qos-reserved="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213926 4685 flags.go:64] FLAG: --read-only-port="10255" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213936 4685 flags.go:64] FLAG: --register-node="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213945 4685 flags.go:64] FLAG: --register-schedulable="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213955 4685 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213970 4685 flags.go:64] FLAG: --registry-burst="10" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213979 4685 flags.go:64] FLAG: --registry-qps="5" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213988 4685 flags.go:64] FLAG: --reserved-cpus="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.213997 4685 flags.go:64] FLAG: --reserved-memory="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214009 4685 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214018 4685 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214027 4685 flags.go:64] FLAG: --rotate-certificates="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214036 4685 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214046 4685 flags.go:64] FLAG: --runonce="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214055 4685 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214064 4685 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214074 4685 flags.go:64] FLAG: --seccomp-default="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214084 4685 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214094 4685 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214104 4685 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214115 4685 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214125 4685 flags.go:64] FLAG: --storage-driver-password="root" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214134 4685 flags.go:64] FLAG: --storage-driver-secure="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214143 4685 flags.go:64] FLAG: --storage-driver-table="stats" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214152 4685 flags.go:64] FLAG: --storage-driver-user="root" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214161 4685 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214197 4685 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214208 4685 flags.go:64] FLAG: --system-cgroups="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214217 4685 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214234 4685 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214243 4685 flags.go:64] FLAG: --tls-cert-file="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214253 4685 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214264 4685 flags.go:64] FLAG: --tls-min-version="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214273 4685 flags.go:64] FLAG: --tls-private-key-file="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214282 4685 flags.go:64] FLAG: --topology-manager-policy="none" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214291 4685 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214301 4685 flags.go:64] FLAG: --topology-manager-scope="container" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214310 4685 flags.go:64] FLAG: --v="2" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214322 4685 flags.go:64] FLAG: --version="false" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214335 4685 flags.go:64] FLAG: --vmodule="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214347 4685 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.214357 4685 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214601 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214614 4685 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214625 4685 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214634 4685 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214643 4685 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214652 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214661 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214669 4685 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214677 4685 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214685 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214693 4685 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214701 4685 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214709 4685 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214717 4685 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214725 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214732 4685 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214741 4685 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214749 4685 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214757 4685 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214765 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214773 4685 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214781 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214789 4685 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214797 4685 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214805 4685 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214813 4685 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214822 4685 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214831 4685 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214840 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214848 4685 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214859 4685 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214869 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214878 4685 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214886 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214896 4685 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214904 4685 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214911 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214920 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214929 4685 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214937 4685 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214945 4685 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214953 4685 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214964 4685 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214973 4685 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214982 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.214992 4685 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215002 4685 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215010 4685 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215019 4685 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215027 4685 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215035 4685 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215043 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215051 4685 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215060 4685 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215068 4685 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215076 4685 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215084 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215092 4685 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215100 4685 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215109 4685 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215117 4685 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215125 4685 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215133 4685 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215140 4685 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215148 4685 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215157 4685 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215165 4685 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215198 4685 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215207 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215215 4685 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.215229 4685 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.215253 4685 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.233904 4685 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.233979 4685 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234079 4685 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234098 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234104 4685 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234112 4685 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234119 4685 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234125 4685 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234130 4685 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234136 4685 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234141 4685 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234145 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234152 4685 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234157 4685 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234162 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234166 4685 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234192 4685 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234198 4685 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234205 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234211 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234218 4685 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234224 4685 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234231 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234239 4685 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234249 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234256 4685 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234265 4685 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234273 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234279 4685 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234285 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234290 4685 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234295 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234300 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234305 4685 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234310 4685 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234315 4685 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234323 4685 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234327 4685 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234332 4685 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234338 4685 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234343 4685 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234348 4685 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234380 4685 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234386 4685 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234391 4685 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234396 4685 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234404 4685 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234410 4685 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234416 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234422 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234428 4685 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234433 4685 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234438 4685 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234444 4685 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234450 4685 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234457 4685 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234464 4685 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234470 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234477 4685 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234487 4685 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234493 4685 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234499 4685 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234505 4685 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234511 4685 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234520 4685 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234527 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234532 4685 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234537 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234542 4685 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234547 4685 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234552 4685 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234557 4685 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234563 4685 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.234574 4685 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234760 4685 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234772 4685 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234778 4685 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234784 4685 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234790 4685 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234797 4685 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234802 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234807 4685 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234812 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234817 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234823 4685 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234828 4685 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234833 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234838 4685 feature_gate.go:330] unrecognized feature gate: Example Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234843 4685 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234851 4685 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234858 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234865 4685 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234873 4685 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234878 4685 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234883 4685 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234889 4685 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234894 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234899 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234904 4685 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234909 4685 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234914 4685 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234919 4685 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234924 4685 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234931 4685 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234936 4685 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234942 4685 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234948 4685 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234954 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234961 4685 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234966 4685 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234971 4685 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234977 4685 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234982 4685 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234987 4685 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234992 4685 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.234997 4685 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235002 4685 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235007 4685 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235012 4685 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235017 4685 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235022 4685 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235029 4685 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235034 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235040 4685 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235046 4685 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235052 4685 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235057 4685 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235062 4685 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235067 4685 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235072 4685 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235077 4685 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235082 4685 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235087 4685 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235092 4685 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235098 4685 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235104 4685 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235110 4685 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235116 4685 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235121 4685 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235128 4685 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235135 4685 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235141 4685 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235147 4685 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235153 4685 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.235160 4685 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.235186 4685 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.236355 4685 server.go:940] "Client rotation is on, will bootstrap in background" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.243636 4685 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.243778 4685 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.245465 4685 server.go:997] "Starting client certificate rotation" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.245515 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.245789 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-22 13:51:48.239434225 +0000 UTC Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.245911 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.303064 4685 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.307318 4685 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.307418 4685 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.334433 4685 log.go:25] "Validated CRI v1 runtime API" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.383087 4685 log.go:25] "Validated CRI v1 image API" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.386444 4685 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.394864 4685 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-28-12-16-41-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.394940 4685 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.417428 4685 manager.go:217] Machine: {Timestamp:2026-01-28 12:20:50.415112544 +0000 UTC m=+1.502526409 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:b11ccd4a-5d5c-4d26-9d13-26d2c695f32b BootID:1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:63:b0:34 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:63:b0:34 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:cd:17:35 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:b0:88:d5 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:95:de:21 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:c2:ae:a2 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:56:3a:ac:b2:3b:da Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:4e:d4:f0:92:00:c1 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.417777 4685 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.418011 4685 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.418503 4685 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.418765 4685 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.418812 4685 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.419921 4685 topology_manager.go:138] "Creating topology manager with none policy" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.419949 4685 container_manager_linux.go:303] "Creating device plugin manager" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.420673 4685 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.420707 4685 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.420913 4685 state_mem.go:36] "Initialized new in-memory state store" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.421012 4685 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.427956 4685 kubelet.go:418] "Attempting to sync node with API server" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.428008 4685 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.428089 4685 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.428114 4685 kubelet.go:324] "Adding apiserver pod source" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.428136 4685 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.434599 4685 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.436389 4685 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.438183 4685 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.439485 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.439611 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.439503 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.439676 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456700 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456762 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456780 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456798 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456823 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456838 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456857 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456882 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456904 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456924 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456947 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.456962 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.466578 4685 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.467374 4685 server.go:1280] "Started kubelet" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.467990 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:50 crc systemd[1]: Started Kubernetes Kubelet. Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.472463 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.472526 4685 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.472569 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 18:53:58.791139708 +0000 UTC Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.472874 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.473068 4685 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.473096 4685 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.473259 4685 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.480293 4685 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.481221 4685 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.482004 4685 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.482579 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="200ms" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.484262 4685 factory.go:55] Registering systemd factory Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.484310 4685 factory.go:221] Registration of the systemd container factory successfully Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.485280 4685 factory.go:153] Registering CRI-O factory Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.485305 4685 factory.go:221] Registration of the crio container factory successfully Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.485395 4685 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.485504 4685 factory.go:103] Registering Raw factory Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.485530 4685 manager.go:1196] Started watching for new ooms in manager Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.486099 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.486262 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.487089 4685 manager.go:319] Starting recovery of all containers Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.490939 4685 server.go:460] "Adding debug handlers to kubelet server" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.489971 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188ee46aaf9fb5f7 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:20:50.467329527 +0000 UTC m=+1.554743392,LastTimestamp:2026-01-28 12:20:50.467329527 +0000 UTC m=+1.554743392,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503545 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503709 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503739 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503763 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503786 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503809 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503833 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503857 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503884 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503907 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503931 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503954 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.503982 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504012 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504036 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504058 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504082 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504105 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504126 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504147 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504204 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504261 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504287 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504310 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504333 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504356 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504382 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504412 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504436 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504459 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504485 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504508 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504533 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504558 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504580 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504603 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504626 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504648 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504672 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504698 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504722 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504744 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504768 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504793 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504817 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504843 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504866 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.504890 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505396 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505426 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505451 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505476 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505509 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505536 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505567 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505595 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505622 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505648 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505674 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505699 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505723 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505748 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505772 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505799 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505827 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505849 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505876 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505902 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505927 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505949 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.505976 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506007 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506032 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506057 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506082 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506105 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506130 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506157 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506269 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506304 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506332 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506357 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506381 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506497 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506526 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506553 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506581 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506610 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506637 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506662 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506690 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506715 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506738 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506761 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506786 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506811 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506839 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506863 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506890 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506919 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506946 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506971 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.506997 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507023 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507059 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507088 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507126 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507154 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507300 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507339 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507370 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507396 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507426 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507453 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507478 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507507 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507529 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507554 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507581 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507605 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507721 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.507749 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512507 4685 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512618 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512660 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512688 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512715 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512742 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512773 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512799 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512827 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512854 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512881 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512910 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512936 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512961 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.512984 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513007 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513032 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513056 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513081 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513105 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513128 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513150 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513211 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513237 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513259 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513285 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513336 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513361 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513387 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513416 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513441 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513468 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513492 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513520 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513545 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513573 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513601 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513627 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513658 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513687 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513714 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513742 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513766 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513793 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513822 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513848 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513874 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513899 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513924 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513949 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.513974 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514000 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514025 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514051 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514080 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514106 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514133 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514157 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514218 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514248 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514272 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514297 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514325 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514353 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514379 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514406 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514433 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514463 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514491 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514520 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514550 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514578 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514605 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514633 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514659 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514686 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514715 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514740 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514768 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514796 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514822 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514847 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514876 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514902 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514929 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514955 4685 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.514983 4685 reconstruct.go:97] "Volume reconstruction finished" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.515001 4685 reconciler.go:26] "Reconciler: start to sync state" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.517560 4685 manager.go:324] Recovery completed Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.533086 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.536554 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.536630 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.536649 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.538126 4685 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.538157 4685 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.538232 4685 state_mem.go:36] "Initialized new in-memory state store" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.541140 4685 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.544066 4685 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.544286 4685 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.544429 4685 kubelet.go:2335] "Starting kubelet main sync loop" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.544595 4685 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 28 12:20:50 crc kubenswrapper[4685]: W0128 12:20:50.545942 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.546058 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.573776 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.645029 4685 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.674395 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.683584 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="400ms" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.775105 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.845929 4685 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.876200 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.888028 4685 policy_none.go:49] "None policy: Start" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.890102 4685 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 28 12:20:50 crc kubenswrapper[4685]: I0128 12:20:50.890265 4685 state_mem.go:35] "Initializing new in-memory state store" Jan 28 12:20:50 crc kubenswrapper[4685]: E0128 12:20:50.976612 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.077202 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.084315 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="800ms" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.178355 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.246969 4685 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.278516 4685 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.293889 4685 manager.go:334] "Starting Device Plugin manager" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.293964 4685 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.293987 4685 server.go:79] "Starting device plugin registration server" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.294587 4685 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.294674 4685 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.294904 4685 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.295090 4685 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.295116 4685 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.305902 4685 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.395713 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.397422 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.397503 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.397523 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.397561 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.398212 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Jan 28 12:20:51 crc kubenswrapper[4685]: W0128 12:20:51.426656 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.426766 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:51 crc kubenswrapper[4685]: W0128 12:20:51.440125 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.440287 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.469577 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.473801 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 23:42:30.809292368 +0000 UTC Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.598449 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.599779 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.599837 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.599860 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:51 crc kubenswrapper[4685]: I0128 12:20:51.599904 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.600522 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Jan 28 12:20:51 crc kubenswrapper[4685]: E0128 12:20:51.884872 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="1.6s" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.001479 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: W0128 12:20:52.003094 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:52 crc kubenswrapper[4685]: E0128 12:20:52.003268 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.003573 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.003744 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.003876 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.004028 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:20:52 crc kubenswrapper[4685]: E0128 12:20:52.004836 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Jan 28 12:20:52 crc kubenswrapper[4685]: W0128 12:20:52.010510 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:52 crc kubenswrapper[4685]: E0128 12:20:52.010570 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.047627 4685 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.047733 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.049086 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.049137 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.049161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.049417 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.049817 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.049894 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.050750 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.050826 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.050850 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.051007 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.051048 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.051072 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.051051 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.051207 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.051264 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054456 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054515 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054467 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054577 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054601 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054537 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054822 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054903 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.054929 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056159 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056247 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056270 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056204 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056362 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056377 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056449 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056546 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.056580 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057438 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057452 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057475 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057484 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057493 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057508 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057780 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.057824 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.058994 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.059057 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.059078 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139315 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139409 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139452 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139489 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139525 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139559 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139653 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139744 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139803 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139848 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139890 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139936 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.139979 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.140018 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.140090 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.241822 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.241898 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.241976 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242016 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242045 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242048 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242111 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242122 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242145 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242241 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242355 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242394 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242456 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242488 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242492 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242526 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242529 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242595 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242652 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242675 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242682 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242718 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242652 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242778 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242806 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242834 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242872 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.242915 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.243002 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.243054 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.388322 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.396678 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.399703 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:20:52 crc kubenswrapper[4685]: E0128 12:20:52.401520 4685 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.414206 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.429771 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.433933 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.469525 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.474609 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 02:02:22.963432991 +0000 UTC Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.806018 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.808018 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.808092 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.808117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:52 crc kubenswrapper[4685]: I0128 12:20:52.808165 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:20:52 crc kubenswrapper[4685]: E0128 12:20:52.808959 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Jan 28 12:20:53 crc kubenswrapper[4685]: W0128 12:20:53.370006 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:53 crc kubenswrapper[4685]: E0128 12:20:53.370131 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:53 crc kubenswrapper[4685]: I0128 12:20:53.469436 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:53 crc kubenswrapper[4685]: I0128 12:20:53.475525 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 21:35:52.860619578 +0000 UTC Jan 28 12:20:53 crc kubenswrapper[4685]: E0128 12:20:53.485575 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="3.2s" Jan 28 12:20:53 crc kubenswrapper[4685]: W0128 12:20:53.515544 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-05a13526d9565a8a8de4f9b11345ea68c28cba50f7a85346c1aa2ad8807415d5 WatchSource:0}: Error finding container 05a13526d9565a8a8de4f9b11345ea68c28cba50f7a85346c1aa2ad8807415d5: Status 404 returned error can't find the container with id 05a13526d9565a8a8de4f9b11345ea68c28cba50f7a85346c1aa2ad8807415d5 Jan 28 12:20:53 crc kubenswrapper[4685]: W0128 12:20:53.517032 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-033f043eb224abae5792e3e72fc96aa778f4630b833866dd813577f2d5c40774 WatchSource:0}: Error finding container 033f043eb224abae5792e3e72fc96aa778f4630b833866dd813577f2d5c40774: Status 404 returned error can't find the container with id 033f043eb224abae5792e3e72fc96aa778f4630b833866dd813577f2d5c40774 Jan 28 12:20:53 crc kubenswrapper[4685]: W0128 12:20:53.518719 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-e783c19589b2eb48366c48c95006b8c1f5079438f09188d26e5a4f0117ded9fc WatchSource:0}: Error finding container e783c19589b2eb48366c48c95006b8c1f5079438f09188d26e5a4f0117ded9fc: Status 404 returned error can't find the container with id e783c19589b2eb48366c48c95006b8c1f5079438f09188d26e5a4f0117ded9fc Jan 28 12:20:53 crc kubenswrapper[4685]: W0128 12:20:53.521323 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-35960b2b40decdd33b473ce0edf91fe73d838708d3924494b832209de04c57de WatchSource:0}: Error finding container 35960b2b40decdd33b473ce0edf91fe73d838708d3924494b832209de04c57de: Status 404 returned error can't find the container with id 35960b2b40decdd33b473ce0edf91fe73d838708d3924494b832209de04c57de Jan 28 12:20:53 crc kubenswrapper[4685]: W0128 12:20:53.522693 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-897ae73c59f2bd350591d20673b9a117afccb02ecfec811d9a9b0b63df5d2fc6 WatchSource:0}: Error finding container 897ae73c59f2bd350591d20673b9a117afccb02ecfec811d9a9b0b63df5d2fc6: Status 404 returned error can't find the container with id 897ae73c59f2bd350591d20673b9a117afccb02ecfec811d9a9b0b63df5d2fc6 Jan 28 12:20:53 crc kubenswrapper[4685]: I0128 12:20:53.553113 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e783c19589b2eb48366c48c95006b8c1f5079438f09188d26e5a4f0117ded9fc"} Jan 28 12:20:53 crc kubenswrapper[4685]: I0128 12:20:53.555653 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"033f043eb224abae5792e3e72fc96aa778f4630b833866dd813577f2d5c40774"} Jan 28 12:20:53 crc kubenswrapper[4685]: I0128 12:20:53.557575 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"35960b2b40decdd33b473ce0edf91fe73d838708d3924494b832209de04c57de"} Jan 28 12:20:53 crc kubenswrapper[4685]: I0128 12:20:53.559089 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"897ae73c59f2bd350591d20673b9a117afccb02ecfec811d9a9b0b63df5d2fc6"} Jan 28 12:20:53 crc kubenswrapper[4685]: I0128 12:20:53.560983 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"05a13526d9565a8a8de4f9b11345ea68c28cba50f7a85346c1aa2ad8807415d5"} Jan 28 12:20:53 crc kubenswrapper[4685]: W0128 12:20:53.760814 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:53 crc kubenswrapper[4685]: E0128 12:20:53.760981 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:54 crc kubenswrapper[4685]: I0128 12:20:54.409778 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:54 crc kubenswrapper[4685]: I0128 12:20:54.411877 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:54 crc kubenswrapper[4685]: I0128 12:20:54.411943 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:54 crc kubenswrapper[4685]: I0128 12:20:54.411960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:54 crc kubenswrapper[4685]: I0128 12:20:54.412009 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:20:54 crc kubenswrapper[4685]: E0128 12:20:54.412872 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Jan 28 12:20:54 crc kubenswrapper[4685]: I0128 12:20:54.469760 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:54 crc kubenswrapper[4685]: I0128 12:20:54.475876 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 00:23:38.353808346 +0000 UTC Jan 28 12:20:54 crc kubenswrapper[4685]: W0128 12:20:54.612778 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:54 crc kubenswrapper[4685]: E0128 12:20:54.612898 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:54 crc kubenswrapper[4685]: W0128 12:20:54.889239 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:54 crc kubenswrapper[4685]: E0128 12:20:54.889366 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:55 crc kubenswrapper[4685]: E0128 12:20:55.414353 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188ee46aaf9fb5f7 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:20:50.467329527 +0000 UTC m=+1.554743392,LastTimestamp:2026-01-28 12:20:50.467329527 +0000 UTC m=+1.554743392,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:20:55 crc kubenswrapper[4685]: I0128 12:20:55.470047 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:55 crc kubenswrapper[4685]: I0128 12:20:55.476045 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 16:07:52.095078599 +0000 UTC Jan 28 12:20:56 crc kubenswrapper[4685]: I0128 12:20:56.469007 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:56 crc kubenswrapper[4685]: I0128 12:20:56.477096 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 18:17:02.799049393 +0000 UTC Jan 28 12:20:56 crc kubenswrapper[4685]: I0128 12:20:56.650129 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:20:56 crc kubenswrapper[4685]: E0128 12:20:56.651994 4685 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:56 crc kubenswrapper[4685]: E0128 12:20:56.686327 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="6.4s" Jan 28 12:20:57 crc kubenswrapper[4685]: I0128 12:20:57.469938 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:57 crc kubenswrapper[4685]: I0128 12:20:57.478068 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 07:10:09.318261742 +0000 UTC Jan 28 12:20:57 crc kubenswrapper[4685]: I0128 12:20:57.613236 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:20:57 crc kubenswrapper[4685]: I0128 12:20:57.615220 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:20:57 crc kubenswrapper[4685]: I0128 12:20:57.615310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:20:57 crc kubenswrapper[4685]: I0128 12:20:57.615328 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:20:57 crc kubenswrapper[4685]: I0128 12:20:57.615368 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:20:57 crc kubenswrapper[4685]: E0128 12:20:57.616824 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Jan 28 12:20:58 crc kubenswrapper[4685]: W0128 12:20:58.343335 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:58 crc kubenswrapper[4685]: E0128 12:20:58.343456 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:58 crc kubenswrapper[4685]: I0128 12:20:58.469215 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:58 crc kubenswrapper[4685]: I0128 12:20:58.478486 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 19:42:23.139639028 +0000 UTC Jan 28 12:20:58 crc kubenswrapper[4685]: W0128 12:20:58.933482 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:58 crc kubenswrapper[4685]: E0128 12:20:58.933577 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:20:59 crc kubenswrapper[4685]: I0128 12:20:59.469566 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:59 crc kubenswrapper[4685]: I0128 12:20:59.478607 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 00:28:12.783475327 +0000 UTC Jan 28 12:20:59 crc kubenswrapper[4685]: W0128 12:20:59.706004 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:20:59 crc kubenswrapper[4685]: E0128 12:20:59.706136 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:21:00 crc kubenswrapper[4685]: I0128 12:21:00.468689 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:00 crc kubenswrapper[4685]: I0128 12:21:00.478845 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 04:49:01.694234237 +0000 UTC Jan 28 12:21:00 crc kubenswrapper[4685]: W0128 12:21:00.953429 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:00 crc kubenswrapper[4685]: E0128 12:21:00.953948 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:21:01 crc kubenswrapper[4685]: E0128 12:21:01.307027 4685 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.469015 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.479895 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 22:40:30.377651604 +0000 UTC Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.580924 4685 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805" exitCode=0 Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.581016 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805"} Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.581057 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.583365 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.583443 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.583464 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.584139 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07"} Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.586325 4685 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="707cd19a9ec0896bb1c0ff4bc44701a5ae7a0b8f5ef0e94505e3ef7c151fe018" exitCode=0 Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.586442 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"707cd19a9ec0896bb1c0ff4bc44701a5ae7a0b8f5ef0e94505e3ef7c151fe018"} Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.586464 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.588053 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.588114 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.588129 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.588999 4685 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef" exitCode=0 Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.589051 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef"} Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.589064 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.590922 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.590966 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.590992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.591100 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37" exitCode=0 Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.591129 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37"} Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.591260 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.592234 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.592285 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.592300 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.596167 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.596920 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.596948 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:01 crc kubenswrapper[4685]: I0128 12:21:01.596960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:02 crc kubenswrapper[4685]: I0128 12:21:02.469534 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:02 crc kubenswrapper[4685]: I0128 12:21:02.480829 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 00:46:52.692923745 +0000 UTC Jan 28 12:21:03 crc kubenswrapper[4685]: E0128 12:21:03.088096 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.469667 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.482158 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 01:46:47.537708538 +0000 UTC Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.602677 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991"} Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.605482 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6"} Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.607723 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030"} Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.611538 4685 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2f7ec2bff084ad4173069e4a8c11a93983308daa705ebbbdd979e28fc41c6196" exitCode=0 Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.611585 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2f7ec2bff084ad4173069e4a8c11a93983308daa705ebbbdd979e28fc41c6196"} Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.611805 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.614205 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.614274 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.614292 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.615354 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28"} Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.615424 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.616543 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.616573 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:03 crc kubenswrapper[4685]: I0128 12:21:03.616585 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.017770 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.019916 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.019962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.019976 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.020004 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:21:04 crc kubenswrapper[4685]: E0128 12:21:04.020608 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.469466 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.482626 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 19:58:46.423706722 +0000 UTC Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.619886 4685 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="dbf6f7c45ccc028d14bf3374bb6e440aa12818605d15c7a3b93397fdeb53c30d" exitCode=0 Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.619943 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"dbf6f7c45ccc028d14bf3374bb6e440aa12818605d15c7a3b93397fdeb53c30d"} Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.620075 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.620910 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.620946 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.620959 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.623640 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339"} Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.623672 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e"} Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.626627 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d"} Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.626679 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401"} Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.629896 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937"} Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.629942 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0"} Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.629947 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.629994 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.631122 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.631162 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.631192 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.631272 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.631295 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.631305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:04 crc kubenswrapper[4685]: I0128 12:21:04.660790 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:21:04 crc kubenswrapper[4685]: E0128 12:21:04.662063 4685 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Jan 28 12:21:05 crc kubenswrapper[4685]: E0128 12:21:05.415573 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188ee46aaf9fb5f7 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:20:50.467329527 +0000 UTC m=+1.554743392,LastTimestamp:2026-01-28 12:20:50.467329527 +0000 UTC m=+1.554743392,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.468998 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.483030 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 23:00:01.708360362 +0000 UTC Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.635262 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c1511d269060050c0b52ac4c26ed87b92e0bc3e05c829792757213d068a293fd"} Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.635318 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e"} Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.635454 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.636387 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.636418 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.636429 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.641099 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2c15c18d98df8152f59d88f29d4b306cbb786ae3a3dd8c6ade47f5cbecfc74de"} Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.641135 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e5910cb1f1b1756c9d54ec3d263c75511b0ad76b8c8f0c3fa28db37876832aea"} Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.641150 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2c33615ba6528105b335fe59f781c0f2921f7c4d0069d8f05812f7415da6a117"} Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.641226 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.641322 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.642400 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.642422 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.642488 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.642504 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.642454 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:05 crc kubenswrapper[4685]: I0128 12:21:05.642547 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.469826 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.483131 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 00:28:13.899128257 +0000 UTC Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.646664 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.649527 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c1511d269060050c0b52ac4c26ed87b92e0bc3e05c829792757213d068a293fd" exitCode=255 Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.649605 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c1511d269060050c0b52ac4c26ed87b92e0bc3e05c829792757213d068a293fd"} Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.649729 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.651353 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.651383 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.651397 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.651974 4685 scope.go:117] "RemoveContainer" containerID="c1511d269060050c0b52ac4c26ed87b92e0bc3e05c829792757213d068a293fd" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.655804 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"37c4a783f533d9621d167344d2536540a5bd634ae66246381b36cd7140cf292b"} Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.655853 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.655890 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.655858 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b7b768ca2a7ddcec3cfc3d802d1e320499323d7375ee0260bbe4029c385700f0"} Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.656006 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.656867 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.656923 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.656937 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.657254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.657313 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.657332 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.762680 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.762871 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.764036 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.764081 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:06 crc kubenswrapper[4685]: I0128 12:21:06.764097 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.483302 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 19:12:34.670183991 +0000 UTC Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.659395 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.661196 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311"} Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.661320 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.661377 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.661444 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.661387 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662207 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662237 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662248 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662595 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662618 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662629 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662716 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662741 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.662753 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.901479 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.901790 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.904161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.904329 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.904360 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:07 crc kubenswrapper[4685]: I0128 12:21:07.924128 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.484015 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 16:47:41.819991013 +0000 UTC Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.649453 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.665760 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.665805 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.665971 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.667355 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.667388 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.667400 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.667385 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.667467 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:08 crc kubenswrapper[4685]: I0128 12:21:08.667491 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:09 crc kubenswrapper[4685]: I0128 12:21:09.284352 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:09 crc kubenswrapper[4685]: I0128 12:21:09.484871 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 19:26:27.908526052 +0000 UTC Jan 28 12:21:09 crc kubenswrapper[4685]: I0128 12:21:09.668698 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:09 crc kubenswrapper[4685]: I0128 12:21:09.670022 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:09 crc kubenswrapper[4685]: I0128 12:21:09.670093 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:09 crc kubenswrapper[4685]: I0128 12:21:09.670120 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.490442 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 19:29:38.51040831 +0000 UTC Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.645433 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.645748 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.647136 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.647230 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.647266 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.671802 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.673196 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.673259 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:10 crc kubenswrapper[4685]: I0128 12:21:10.673275 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.021412 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.022993 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.023039 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.023054 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.023084 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.075161 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.075510 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.077097 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.077222 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.077256 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:11 crc kubenswrapper[4685]: E0128 12:21:11.308045 4685 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 12:21:11 crc kubenswrapper[4685]: I0128 12:21:11.491521 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 21:51:05.35168737 +0000 UTC Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.275593 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.275768 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.278098 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.278119 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.278128 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.281759 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.491929 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 03:36:51.124834386 +0000 UTC Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.677903 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.679161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.679222 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:12 crc kubenswrapper[4685]: I0128 12:21:12.679235 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:13 crc kubenswrapper[4685]: I0128 12:21:13.492372 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 04:28:08.598442148 +0000 UTC Jan 28 12:21:14 crc kubenswrapper[4685]: I0128 12:21:14.360591 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:14 crc kubenswrapper[4685]: I0128 12:21:14.360787 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:14 crc kubenswrapper[4685]: I0128 12:21:14.362058 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:14 crc kubenswrapper[4685]: I0128 12:21:14.362137 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:14 crc kubenswrapper[4685]: I0128 12:21:14.362156 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:14 crc kubenswrapper[4685]: I0128 12:21:14.493159 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 02:42:58.34393379 +0000 UTC Jan 28 12:21:15 crc kubenswrapper[4685]: I0128 12:21:15.275863 4685 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:21:15 crc kubenswrapper[4685]: I0128 12:21:15.276020 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:21:15 crc kubenswrapper[4685]: I0128 12:21:15.494318 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 10:54:04.967895986 +0000 UTC Jan 28 12:21:16 crc kubenswrapper[4685]: I0128 12:21:16.495029 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 21:37:14.689233084 +0000 UTC Jan 28 12:21:17 crc kubenswrapper[4685]: W0128 12:21:17.227789 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.227921 4685 trace.go:236] Trace[2032443724]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 12:21:07.226) (total time: 10001ms): Jan 28 12:21:17 crc kubenswrapper[4685]: Trace[2032443724]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (12:21:17.227) Jan 28 12:21:17 crc kubenswrapper[4685]: Trace[2032443724]: [10.001308827s] [10.001308827s] END Jan 28 12:21:17 crc kubenswrapper[4685]: E0128 12:21:17.227961 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 28 12:21:17 crc kubenswrapper[4685]: W0128 12:21:17.414122 4685 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.414284 4685 trace.go:236] Trace[629101717]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 12:21:07.412) (total time: 10001ms): Jan 28 12:21:17 crc kubenswrapper[4685]: Trace[629101717]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (12:21:17.414) Jan 28 12:21:17 crc kubenswrapper[4685]: Trace[629101717]: [10.00143898s] [10.00143898s] END Jan 28 12:21:17 crc kubenswrapper[4685]: E0128 12:21:17.414313 4685 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.470691 4685 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.496139 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 16:00:51.5520007 +0000 UTC Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.722721 4685 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.722822 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.727504 4685 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 12:21:17 crc kubenswrapper[4685]: I0128 12:21:17.727591 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 12:21:18 crc kubenswrapper[4685]: I0128 12:21:18.497260 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 07:31:17.836007496 +0000 UTC Jan 28 12:21:18 crc kubenswrapper[4685]: I0128 12:21:18.655829 4685 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]log ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]etcd ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/generic-apiserver-start-informers ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/priority-and-fairness-filter ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-apiextensions-informers ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-apiextensions-controllers ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/crd-informer-synced ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-system-namespaces-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-service-ip-repair-controllers ok Jan 28 12:21:18 crc kubenswrapper[4685]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 28 12:21:18 crc kubenswrapper[4685]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/priority-and-fairness-config-producer ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/bootstrap-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/start-kube-aggregator-informers ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/apiservice-registration-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/apiservice-discovery-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]autoregister-completion ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/apiservice-openapi-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 28 12:21:18 crc kubenswrapper[4685]: livez check failed Jan 28 12:21:18 crc kubenswrapper[4685]: I0128 12:21:18.657416 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:21:19 crc kubenswrapper[4685]: I0128 12:21:19.497777 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 00:58:40.349633906 +0000 UTC Jan 28 12:21:20 crc kubenswrapper[4685]: I0128 12:21:20.498593 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 20:33:52.717903994 +0000 UTC Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.140236 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.140522 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.141753 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.141801 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.141812 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.158541 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 28 12:21:21 crc kubenswrapper[4685]: E0128 12:21:21.308224 4685 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.499446 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 06:13:36.602326148 +0000 UTC Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.703122 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.704472 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.704520 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:21 crc kubenswrapper[4685]: I0128 12:21:21.704531 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.117154 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.137112 4685 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.160091 4685 csr.go:261] certificate signing request csr-pxg2r is approved, waiting to be issued Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.500127 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 11:35:44.811077475 +0000 UTC Jan 28 12:21:22 crc kubenswrapper[4685]: E0128 12:21:22.713133 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Jan 28 12:21:22 crc kubenswrapper[4685]: E0128 12:21:22.717343 4685 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.822837 4685 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.823290 4685 trace.go:236] Trace[1996327689]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 12:21:08.723) (total time: 14099ms): Jan 28 12:21:22 crc kubenswrapper[4685]: Trace[1996327689]: ---"Objects listed" error: 14099ms (12:21:22.823) Jan 28 12:21:22 crc kubenswrapper[4685]: Trace[1996327689]: [14.099598019s] [14.099598019s] END Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.823349 4685 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.823758 4685 trace.go:236] Trace[1786503936]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 12:21:11.998) (total time: 10825ms): Jan 28 12:21:22 crc kubenswrapper[4685]: Trace[1786503936]: ---"Objects listed" error: 10824ms (12:21:22.823) Jan 28 12:21:22 crc kubenswrapper[4685]: Trace[1786503936]: [10.825142565s] [10.825142565s] END Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.823800 4685 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 12:21:22 crc kubenswrapper[4685]: I0128 12:21:22.840069 4685 csr.go:257] certificate signing request csr-pxg2r is issued Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.175040 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.179259 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.447503 4685 apiserver.go:52] "Watching apiserver" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.451697 4685 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.452968 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.453455 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.453539 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.453693 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.453831 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.453905 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.453985 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.454111 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.454127 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.454204 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457293 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457316 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457331 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457357 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457489 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457534 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457539 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.457572 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.458480 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.470663 4685 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.470704 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.481691 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.483025 4685 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.494259 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.501722 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 11:29:54.209845944 +0000 UTC Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.508641 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.519612 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.534941 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.534996 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535021 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535043 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535063 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535082 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535101 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535118 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535142 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535161 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535201 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535233 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535253 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535273 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535324 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535345 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535364 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535385 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535406 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535423 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535430 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535454 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535477 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535431 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535477 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535497 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535583 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535614 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535618 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535676 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535696 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535716 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535720 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535732 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535753 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535752 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535764 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535774 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535838 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535859 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535859 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535878 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535931 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535960 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.535985 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536004 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536004 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536027 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536047 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536055 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536066 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536121 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536149 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536129 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536199 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536233 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536254 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536187 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536280 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536306 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536328 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536327 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536353 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536377 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536445 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536463 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536488 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536503 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536522 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536511 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536562 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536596 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536617 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536629 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536634 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536674 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536700 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536723 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536744 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536765 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536778 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536789 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536813 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536834 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536855 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536859 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536875 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536897 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536918 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536942 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536966 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.536986 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537007 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537028 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537026 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537049 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537069 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537071 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537074 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537089 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537120 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537123 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537138 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537158 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537202 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537228 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537253 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537277 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537288 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537295 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537317 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537335 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537351 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537368 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537370 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537381 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537598 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537627 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537383 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537678 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537749 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537774 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537796 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537821 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537846 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537868 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537889 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537910 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537938 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537959 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537981 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538004 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538025 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538046 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538068 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538090 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538114 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538146 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538186 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538208 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538230 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538251 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538271 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538292 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538312 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538334 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538354 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538373 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538393 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538415 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538436 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538455 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538477 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538498 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538521 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538541 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538564 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538585 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538606 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538628 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538649 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538670 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538690 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538710 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538733 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538754 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538776 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538796 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538816 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538837 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538856 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538879 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538900 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538923 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538944 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538966 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538989 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539009 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539053 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539079 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539101 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539123 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539144 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539195 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539218 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539242 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539285 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539306 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539328 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539352 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539375 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539395 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539418 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539439 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539461 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539482 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539510 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539532 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539554 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539576 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539601 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539623 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539645 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537679 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537788 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537900 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.537924 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538065 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538076 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538227 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538374 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538566 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.538752 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539133 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540477 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539253 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539340 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539399 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539539 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540509 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539561 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539642 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.539682 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:21:24.039660228 +0000 UTC m=+35.127074073 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540604 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540647 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540679 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540706 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540720 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540738 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540760 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540782 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540802 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540820 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540836 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540839 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540887 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540914 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540937 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540960 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540972 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540982 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541027 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541052 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541070 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541078 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541119 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541141 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541163 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541199 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541245 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541270 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541291 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541310 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541334 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541337 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541403 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541607 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539787 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539841 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539844 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.539861 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540035 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540130 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540265 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540271 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540363 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.540550 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541743 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541757 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542022 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542110 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542194 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542424 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542442 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542449 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542450 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542485 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542617 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542872 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.542886 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543035 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543118 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543136 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543121 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543342 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543425 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543476 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543647 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543713 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543752 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543763 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543770 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543757 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.543813 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.544388 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.544397 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.544407 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.544503 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.544641 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.544777 4685 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.545217 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.545259 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.545424 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.545675 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.545689 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.545964 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546011 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546307 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546388 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546517 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546596 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546666 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546693 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546821 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.546999 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.547017 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.547080 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.547205 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.547583 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.547643 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.547827 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.547851 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.548246 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.548274 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.548680 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.548727 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.548745 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549103 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549113 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549404 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549410 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549454 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549755 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549787 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549850 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.549930 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.550152 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.550572 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.551502 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552019 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552066 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552117 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552326 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552435 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552554 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552674 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.552945 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.553114 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.553336 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.553455 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.553522 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.553526 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.554095 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.553963 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.554348 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.554445 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.554484 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.554791 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.554804 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555019 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555348 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555360 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555402 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555412 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555610 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555614 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.555844 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.556344 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.556412 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.556453 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.556606 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.556703 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.557015 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.557239 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.557245 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.557663 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.557884 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.541356 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.557954 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.557982 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558003 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558031 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558051 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558068 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558086 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558113 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558154 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558276 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558289 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558298 4685 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558309 4685 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558319 4685 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558328 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558337 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558346 4685 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558354 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558364 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558373 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558382 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558390 4685 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558397 4685 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558405 4685 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558414 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558423 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558431 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558439 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558447 4685 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558455 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558463 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558472 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558468 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558481 4685 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558489 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558499 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558507 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558515 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558523 4685 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558539 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558548 4685 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558557 4685 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558567 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558577 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558586 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558595 4685 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558603 4685 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558611 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558620 4685 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558631 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558639 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558649 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558658 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558667 4685 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558675 4685 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558683 4685 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558691 4685 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558699 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558708 4685 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558716 4685 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558724 4685 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558732 4685 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558741 4685 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558758 4685 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558768 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558777 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558786 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558794 4685 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558803 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558812 4685 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558820 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558828 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558839 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558847 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558856 4685 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558865 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558873 4685 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558881 4685 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558891 4685 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558901 4685 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558909 4685 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558917 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558925 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558934 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558942 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558950 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558958 4685 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558967 4685 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558975 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558983 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558991 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558999 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559006 4685 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559014 4685 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559022 4685 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559030 4685 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559039 4685 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559047 4685 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559055 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559065 4685 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559074 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559083 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559093 4685 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559101 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559109 4685 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559119 4685 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559128 4685 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.559136 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560410 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560427 4685 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560436 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560445 4685 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560454 4685 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560463 4685 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560471 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560480 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560488 4685 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560496 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560504 4685 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560513 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560522 4685 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560530 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560539 4685 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560547 4685 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560557 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560565 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560573 4685 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560582 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560590 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560598 4685 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560606 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560615 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560623 4685 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560632 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560641 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560651 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560659 4685 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560668 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560677 4685 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560685 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560693 4685 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560702 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560709 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560718 4685 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560727 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560734 4685 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560742 4685 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560750 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560760 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560772 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560782 4685 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560792 4685 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560804 4685 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560814 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560824 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560832 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560841 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560849 4685 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560857 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560864 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560872 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560879 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560887 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560895 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560903 4685 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560900 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558606 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.558936 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560037 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.560748 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.561020 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:24.061004856 +0000 UTC m=+35.148418691 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560663 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.560911 4685 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561063 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561071 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561080 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561089 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561098 4685 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561106 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561115 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561124 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561135 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561145 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561156 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561185 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561196 4685 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561205 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561214 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561223 4685 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561232 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561241 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561250 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.560942 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.561287 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:24.061280273 +0000 UTC m=+35.148694108 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.561572 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.562135 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.562418 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.562576 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.556133 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.567197 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.569786 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.570276 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.570490 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.572510 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.582050 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.582083 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.582097 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.582196 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:24.08215257 +0000 UTC m=+35.169566405 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.583638 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.586998 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.587766 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.588521 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.588627 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.588704 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.588814 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:24.088793291 +0000 UTC m=+35.176207126 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.591750 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.819051 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.820623 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.820679 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.820953 4685 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.820980 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.820993 4685 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821006 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821021 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821020 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821032 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821245 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821617 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821648 4685 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821663 4685 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821700 4685 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821711 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821720 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821731 4685 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821745 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.821772 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.822623 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.828783 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.830105 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.835896 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" exitCode=255 Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.836647 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311"} Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.836711 4685 scope.go:117] "RemoveContainer" containerID="c1511d269060050c0b52ac4c26ed87b92e0bc3e05c829792757213d068a293fd" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.837787 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.839826 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.841381 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-28 12:16:22 +0000 UTC, rotation deadline is 2026-12-14 06:06:42.434831927 +0000 UTC Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.841434 4685 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7673h45m18.593400522s for next certificate rotation Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.843671 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.851568 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.852057 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.854939 4685 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.857848 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.869340 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.881728 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.903836 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.922133 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.922167 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.922192 4685 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.922201 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.924356 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.937530 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.941533 4685 scope.go:117] "RemoveContainer" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:21:23 crc kubenswrapper[4685]: E0128 12:21:23.941733 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.942386 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.960880 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.981135 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:23 crc kubenswrapper[4685]: I0128 12:21:23.994354 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.003862 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.012569 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.065086 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.074189 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 12:21:24 crc kubenswrapper[4685]: W0128 12:21:24.078275 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-d901c8fa253d3020ac9d33597953f4361a61072b12d4e86fb0d67d9858078b31 WatchSource:0}: Error finding container d901c8fa253d3020ac9d33597953f4361a61072b12d4e86fb0d67d9858078b31: Status 404 returned error can't find the container with id d901c8fa253d3020ac9d33597953f4361a61072b12d4e86fb0d67d9858078b31 Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.080350 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.123348 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.123490 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123571 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:21:25.123513767 +0000 UTC m=+36.210927732 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123629 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.123638 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.123692 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123720 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:25.123707052 +0000 UTC m=+36.211120887 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.123787 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123798 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123816 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123835 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123848 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123881 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:25.123870966 +0000 UTC m=+36.211284971 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123905 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:25.123892206 +0000 UTC m=+36.211306271 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123956 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123975 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.123985 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.124037 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:25.12402498 +0000 UTC m=+36.211438815 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.290560 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.303500 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.316960 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.329105 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.358026 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.368627 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.378843 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1511d269060050c0b52ac4c26ed87b92e0bc3e05c829792757213d068a293fd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:06Z\\\",\\\"message\\\":\\\"W0128 12:21:05.673708 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 12:21:05.674049 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769602865 cert, and key in /tmp/serving-cert-2924620708/serving-signer.crt, /tmp/serving-cert-2924620708/serving-signer.key\\\\nI0128 12:21:06.060337 1 observer_polling.go:159] Starting file observer\\\\nW0128 12:21:06.064500 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 12:21:06.064902 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:06.069442 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2924620708/tls.crt::/tmp/serving-cert-2924620708/tls.key\\\\\\\"\\\\nF0128 12:21:06.426660 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:05Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.389124 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.398666 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.502680 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 15:33:46.35276644 +0000 UTC Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.549964 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.550676 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.551634 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.552327 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.553006 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.553626 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.554448 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.555090 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.555876 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.556617 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.557282 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.558058 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.558872 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.559532 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.560080 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.561698 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.562311 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.562949 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.563775 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.564458 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.564977 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.565689 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.567481 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.568926 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.570763 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.571823 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.573257 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.573845 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.574586 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.575630 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.576231 4685 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.576364 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.578879 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.579614 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.580060 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.582069 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.582750 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.583256 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.584397 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.585496 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.586017 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.586661 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.587313 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.587893 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.588363 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.592279 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.593791 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.595443 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.596120 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.597201 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.597823 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.598706 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.600015 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.600706 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.839962 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"d901c8fa253d3020ac9d33597953f4361a61072b12d4e86fb0d67d9858078b31"} Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.849094 4685 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.849407 4685 scope.go:117] "RemoveContainer" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:21:24 crc kubenswrapper[4685]: E0128 12:21:24.849626 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.857381 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.874019 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.887264 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.911307 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.936388 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:24 crc kubenswrapper[4685]: I0128 12:21:24.954494 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.019082 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.048451 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.102575 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.131067 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131249 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:21:27.131220141 +0000 UTC m=+38.218633976 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.131449 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.131524 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.131604 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.131678 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131621 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131684 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131907 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131982 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131707 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.132083 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.132096 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131720 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.131863 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:27.131829817 +0000 UTC m=+38.219243652 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.132303 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:27.132280569 +0000 UTC m=+38.219694404 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.132396 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:27.132377721 +0000 UTC m=+38.219791556 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.132427 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:27.132418662 +0000 UTC m=+38.219832497 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.503645 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 21:22:50.008211174 +0000 UTC Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.508461 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-bpbjn"] Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.508930 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.509236 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-vkpgm"] Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.509979 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-h5wpv"] Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.510415 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.510848 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.513738 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.513889 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.514716 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.515829 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.515881 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.516222 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.516509 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.517044 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.517338 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.517466 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.519046 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.519068 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.519458 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.519697 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rrnv6"] Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.520067 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.522409 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.523290 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.532479 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.535462 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-os-release\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.535532 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-cni-bin\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.535648 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-multus-certs\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.535769 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-mcd-auth-proxy-config\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.535898 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-system-cni-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.535991 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-netns\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536029 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdlnr\" (UniqueName: \"kubernetes.io/projected/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-kube-api-access-zdlnr\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536096 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-cni-multus\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536257 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-os-release\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536352 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536424 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-proxy-tls\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536462 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-k8s-cni-cncf-io\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536536 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-etc-kubernetes\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536736 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/28aac5d8-57ac-4302-ab17-c07f33fcaffd-cni-binary-copy\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536841 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-cnibin\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536890 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cnibin\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536931 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsvg6\" (UniqueName: \"kubernetes.io/projected/6a28aa0f-04d6-471c-95f2-ef2268a29b62-kube-api-access-fsvg6\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.536966 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6pqm\" (UniqueName: \"kubernetes.io/projected/31830369-ada9-4ed3-8265-15051d6315f4-kube-api-access-t6pqm\") pod \"node-resolver-bpbjn\" (UID: \"31830369-ada9-4ed3-8265-15051d6315f4\") " pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.537025 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-kubelet\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.537056 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-hostroot\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.537087 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpxs4\" (UniqueName: \"kubernetes.io/projected/28aac5d8-57ac-4302-ab17-c07f33fcaffd-kube-api-access-zpxs4\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.537122 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-system-cni-dir\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.537154 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cni-binary-copy\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.537263 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-cni-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.544741 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.544766 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.544854 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.544918 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.545054 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.545201 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.548305 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-rootfs\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.548373 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-conf-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.548481 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.548576 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/31830369-ada9-4ed3-8265-15051d6315f4-hosts-file\") pod \"node-resolver-bpbjn\" (UID: \"31830369-ada9-4ed3-8265-15051d6315f4\") " pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.548640 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-socket-dir-parent\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.548679 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-daemon-config\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.548356 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.562097 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.580970 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.591009 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.599937 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.612209 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.622735 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.632762 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.643576 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650060 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-cnibin\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650121 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cnibin\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650155 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsvg6\" (UniqueName: \"kubernetes.io/projected/6a28aa0f-04d6-471c-95f2-ef2268a29b62-kube-api-access-fsvg6\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650205 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6pqm\" (UniqueName: \"kubernetes.io/projected/31830369-ada9-4ed3-8265-15051d6315f4-kube-api-access-t6pqm\") pod \"node-resolver-bpbjn\" (UID: \"31830369-ada9-4ed3-8265-15051d6315f4\") " pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650228 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-cnibin\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650235 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-kubelet\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650259 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cnibin\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650298 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-hostroot\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650320 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-hostroot\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650286 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-kubelet\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650384 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpxs4\" (UniqueName: \"kubernetes.io/projected/28aac5d8-57ac-4302-ab17-c07f33fcaffd-kube-api-access-zpxs4\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650417 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-system-cni-dir\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650443 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cni-binary-copy\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650472 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-cni-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650492 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-system-cni-dir\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650508 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-rootfs\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650532 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-conf-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650565 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650585 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/31830369-ada9-4ed3-8265-15051d6315f4-hosts-file\") pod \"node-resolver-bpbjn\" (UID: \"31830369-ada9-4ed3-8265-15051d6315f4\") " pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650608 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-socket-dir-parent\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650611 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-conf-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650613 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-rootfs\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650634 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-daemon-config\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650658 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/31830369-ada9-4ed3-8265-15051d6315f4-hosts-file\") pod \"node-resolver-bpbjn\" (UID: \"31830369-ada9-4ed3-8265-15051d6315f4\") " pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650658 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-os-release\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650741 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-cni-bin\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650753 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-os-release\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650759 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-multus-certs\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650751 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-socket-dir-parent\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650781 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-mcd-auth-proxy-config\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650782 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-cni-bin\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650797 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-system-cni-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650814 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-multus-certs\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650829 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-system-cni-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650872 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-netns\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650898 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-cni-dir\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650902 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdlnr\" (UniqueName: \"kubernetes.io/projected/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-kube-api-access-zdlnr\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650926 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-cni-multus\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650947 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-os-release\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650965 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.650986 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-proxy-tls\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651004 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-k8s-cni-cncf-io\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651022 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-etc-kubernetes\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651027 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-os-release\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651045 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/28aac5d8-57ac-4302-ab17-c07f33fcaffd-cni-binary-copy\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651060 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-var-lib-cni-multus\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651107 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-etc-kubernetes\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651131 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-k8s-cni-cncf-io\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.651227 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/28aac5d8-57ac-4302-ab17-c07f33fcaffd-host-run-netns\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.656784 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.667615 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.680003 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.692543 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.706306 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.716851 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.728005 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.742033 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.753574 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.771253 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.785542 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.843493 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"dccf5cab74b2f06ebccbfce02bf9b6ac053d574719d7ec5b9e36d96376e3678a"} Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.844235 4685 scope.go:117] "RemoveContainer" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:21:25 crc kubenswrapper[4685]: E0128 12:21:25.844428 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.907923 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-b85rl"] Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.908784 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.911314 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.912547 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.912642 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.912721 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.912765 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.913375 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.913405 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.927242 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.938962 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.947993 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954558 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-systemd\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954602 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-env-overrides\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954629 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954655 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-node-log\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954687 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-systemd-units\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954709 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-netd\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954747 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-ovn\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954768 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-862jf\" (UniqueName: \"kubernetes.io/projected/6fd64f35-81dc-4978-84e8-a746e9a79ccd-kube-api-access-862jf\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954873 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-var-lib-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954950 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-log-socket\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.954993 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-netns\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955041 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-slash\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955082 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-kubelet\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955106 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-script-lib\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955141 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovn-node-metrics-cert\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955202 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-bin\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955298 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-etc-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955369 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-ovn-kubernetes\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955409 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.955444 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-config\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.964562 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.980736 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:25 crc kubenswrapper[4685]: I0128 12:21:25.993622 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.005699 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.015519 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.023990 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.033629 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.044566 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056253 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056420 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-var-lib-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056456 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-log-socket\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056509 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-netns\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056527 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-slash\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056552 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-kubelet\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056573 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-script-lib\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056569 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-var-lib-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056614 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovn-node-metrics-cert\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056641 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-slash\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056649 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-log-socket\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056680 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-kubelet\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056699 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-bin\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056641 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-netns\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056732 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056750 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-bin\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056762 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-config\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056796 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-etc-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056797 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056819 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-ovn-kubernetes\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056846 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-systemd\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056869 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-env-overrides\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056881 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-systemd\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056846 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-etc-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056895 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-ovn-kubernetes\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.056923 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057013 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-openvswitch\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057068 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-node-log\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057103 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-systemd-units\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057118 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-netd\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057153 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-ovn\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057193 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-862jf\" (UniqueName: \"kubernetes.io/projected/6fd64f35-81dc-4978-84e8-a746e9a79ccd-kube-api-access-862jf\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057208 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-node-log\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057218 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-systemd-units\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057227 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-netd\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.057285 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-ovn\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.069725 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.504607 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 16:12:52.000270512 +0000 UTC Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.624862 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6a28aa0f-04d6-471c-95f2-ef2268a29b62-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.718551 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6pqm\" (UniqueName: \"kubernetes.io/projected/31830369-ada9-4ed3-8265-15051d6315f4-kube-api-access-t6pqm\") pod \"node-resolver-bpbjn\" (UID: \"31830369-ada9-4ed3-8265-15051d6315f4\") " pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.769259 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cni-binary-copy\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.769259 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/28aac5d8-57ac-4302-ab17-c07f33fcaffd-multus-daemon-config\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.769405 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-mcd-auth-proxy-config\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.769617 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6a28aa0f-04d6-471c-95f2-ef2268a29b62-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.769923 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-env-overrides\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.770572 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-script-lib\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.770791 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/28aac5d8-57ac-4302-ab17-c07f33fcaffd-cni-binary-copy\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.771104 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-config\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.773213 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsvg6\" (UniqueName: \"kubernetes.io/projected/6a28aa0f-04d6-471c-95f2-ef2268a29b62-kube-api-access-fsvg6\") pod \"multus-additional-cni-plugins-vkpgm\" (UID: \"6a28aa0f-04d6-471c-95f2-ef2268a29b62\") " pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.773732 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-proxy-tls\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.774277 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovn-node-metrics-cert\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.775974 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdlnr\" (UniqueName: \"kubernetes.io/projected/c708b01f-11f7-4c21-86c4-92ac3c7e9cb1-kube-api-access-zdlnr\") pod \"machine-config-daemon-h5wpv\" (UID: \"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\") " pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.776265 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-862jf\" (UniqueName: \"kubernetes.io/projected/6fd64f35-81dc-4978-84e8-a746e9a79ccd-kube-api-access-862jf\") pod \"ovnkube-node-b85rl\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.776498 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpxs4\" (UniqueName: \"kubernetes.io/projected/28aac5d8-57ac-4302-ab17-c07f33fcaffd-kube-api-access-zpxs4\") pod \"multus-rrnv6\" (UID: \"28aac5d8-57ac-4302-ab17-c07f33fcaffd\") " pod="openshift-multus/multus-rrnv6" Jan 28 12:21:26 crc kubenswrapper[4685]: I0128 12:21:26.846504 4685 scope.go:117] "RemoveContainer" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:21:26 crc kubenswrapper[4685]: E0128 12:21:26.846705 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 12:21:26 crc kubenswrapper[4685]: W0128 12:21:26.887596 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-ab448dc31c77ae60a7abe1d2d287dcc47130ce580fd3ffa3b369b43e43a724cf WatchSource:0}: Error finding container ab448dc31c77ae60a7abe1d2d287dcc47130ce580fd3ffa3b369b43e43a724cf: Status 404 returned error can't find the container with id ab448dc31c77ae60a7abe1d2d287dcc47130ce580fd3ffa3b369b43e43a724cf Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.014458 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-bpbjn" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.068444 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rrnv6" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.068523 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.068741 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.068905 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.170912 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.171065 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.171121 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.171505 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.171574 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.171809 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.171837 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.171852 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.171933 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:31.17191445 +0000 UTC m=+42.259328285 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172073 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:21:31.172063054 +0000 UTC m=+42.259476889 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172155 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172206 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172218 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172244 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:31.172236608 +0000 UTC m=+42.259650443 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172317 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172371 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:31.172360731 +0000 UTC m=+42.259774566 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172462 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.172493 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:31.172485175 +0000 UTC m=+42.259899010 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.505165 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 22:13:14.938632915 +0000 UTC Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.545690 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.545695 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.545719 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.545872 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.545959 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:27 crc kubenswrapper[4685]: E0128 12:21:27.546100 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.850051 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-bpbjn" event={"ID":"31830369-ada9-4ed3-8265-15051d6315f4","Type":"ContainerStarted","Data":"e03f60bf094ccbc3d5635579c2708248d682647ccdeca9e7b4355ea3a325214f"} Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.851327 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rrnv6" event={"ID":"28aac5d8-57ac-4302-ab17-c07f33fcaffd","Type":"ContainerStarted","Data":"7ded4f84b51eacb3b7b93675e85a189a2dea321475d4219796ea7f778cc26e7a"} Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.852639 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"08cf184a0195cda1de29e3a1d361189133ca8c6a64f916f37b2cafecfe2c6852"} Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.854243 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerStarted","Data":"0ed60c0dda53f7ad0b9af10a63b62cf2431a22725570ab073ac195f924e1b9e9"} Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.855301 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"e8231f94f6b476f75119ac166493972c5a3edadd62d1c4c7740fc834cefb1ee8"} Jan 28 12:21:27 crc kubenswrapper[4685]: I0128 12:21:27.856359 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ab448dc31c77ae60a7abe1d2d287dcc47130ce580fd3ffa3b369b43e43a724cf"} Jan 28 12:21:28 crc kubenswrapper[4685]: I0128 12:21:28.505577 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 06:56:32.394164388 +0000 UTC Jan 28 12:21:28 crc kubenswrapper[4685]: I0128 12:21:28.862398 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912"} Jan 28 12:21:28 crc kubenswrapper[4685]: I0128 12:21:28.864460 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.506690 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 05:41:49.149866268 +0000 UTC Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.545084 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.545095 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.545311 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.545102 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.545391 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.545454 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.717635 4685 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.720048 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.720114 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.720132 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.720341 4685 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.730165 4685 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.730270 4685 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.733605 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.734100 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.734146 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.734191 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.734209 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:29Z","lastTransitionTime":"2026-01-28T12:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.758297 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.762904 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.762960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.762978 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.763002 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.763019 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:29Z","lastTransitionTime":"2026-01-28T12:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.778653 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.782733 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.782773 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.782784 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.782800 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.782810 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:29Z","lastTransitionTime":"2026-01-28T12:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.799290 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.804892 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.804946 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.804962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.804986 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.805002 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:29Z","lastTransitionTime":"2026-01-28T12:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.821726 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.828021 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.828150 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.828304 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.828393 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.828489 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:29Z","lastTransitionTime":"2026-01-28T12:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.844699 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:29 crc kubenswrapper[4685]: E0128 12:21:29.844838 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.846617 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.846682 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.846694 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.846715 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.846727 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:29Z","lastTransitionTime":"2026-01-28T12:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.871903 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6"} Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.873475 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerStarted","Data":"f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80"} Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.874847 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c"} Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.949977 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.950009 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.950020 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.950036 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.950047 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:29Z","lastTransitionTime":"2026-01-28T12:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.953964 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-ml94r"] Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.954437 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.956884 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.957432 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.958050 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.958132 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.968859 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.981302 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:29 crc kubenswrapper[4685]: I0128 12:21:29.992395 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.002277 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.006049 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/11d63fa9-8fe9-435e-87ce-e804aadd7def-serviceca\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.006095 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86vvx\" (UniqueName: \"kubernetes.io/projected/11d63fa9-8fe9-435e-87ce-e804aadd7def-kube-api-access-86vvx\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.006122 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11d63fa9-8fe9-435e-87ce-e804aadd7def-host\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.012527 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.028035 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.039463 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.053065 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.053095 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.053105 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.053118 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.053128 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.053844 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.065490 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.074919 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.087131 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.099364 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.107094 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/11d63fa9-8fe9-435e-87ce-e804aadd7def-serviceca\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.107162 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86vvx\" (UniqueName: \"kubernetes.io/projected/11d63fa9-8fe9-435e-87ce-e804aadd7def-kube-api-access-86vvx\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.107219 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11d63fa9-8fe9-435e-87ce-e804aadd7def-host\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.107338 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11d63fa9-8fe9-435e-87ce-e804aadd7def-host\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.108204 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/11d63fa9-8fe9-435e-87ce-e804aadd7def-serviceca\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.109725 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.119690 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.130067 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86vvx\" (UniqueName: \"kubernetes.io/projected/11d63fa9-8fe9-435e-87ce-e804aadd7def-kube-api-access-86vvx\") pod \"node-ca-ml94r\" (UID: \"11d63fa9-8fe9-435e-87ce-e804aadd7def\") " pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.156076 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.156118 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.156127 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.156144 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.156155 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.246551 4685 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 28 12:21:30 crc kubenswrapper[4685]: W0128 12:21:30.246823 4685 reflector.go:484] object-"openshift-image-registry"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:21:30 crc kubenswrapper[4685]: W0128 12:21:30.246857 4685 reflector.go:484] object-"openshift-image-registry"/"node-ca-dockercfg-4777p": watch of *v1.Secret ended with: very short watch: object-"openshift-image-registry"/"node-ca-dockercfg-4777p": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:21:30 crc kubenswrapper[4685]: W0128 12:21:30.246883 4685 reflector.go:484] object-"openshift-image-registry"/"image-registry-certificates": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"image-registry-certificates": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:21:30 crc kubenswrapper[4685]: W0128 12:21:30.247631 4685 reflector.go:484] object-"openshift-image-registry"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.267653 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.268052 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.268190 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.268397 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.268524 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.272457 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-ml94r" Jan 28 12:21:30 crc kubenswrapper[4685]: W0128 12:21:30.286964 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11d63fa9_8fe9_435e_87ce_e804aadd7def.slice/crio-422230fd5cc9ee29b03f1b5d2d2a7900aa288d78157421206c375d2a3fe009e1 WatchSource:0}: Error finding container 422230fd5cc9ee29b03f1b5d2d2a7900aa288d78157421206c375d2a3fe009e1: Status 404 returned error can't find the container with id 422230fd5cc9ee29b03f1b5d2d2a7900aa288d78157421206c375d2a3fe009e1 Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.371283 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.371324 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.371335 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.371351 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.371362 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.479562 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.479611 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.479622 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.479641 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.479654 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.518861 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 16:25:21.238366263 +0000 UTC Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.559209 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.571015 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.581394 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.582404 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.582447 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.582457 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.582475 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.582488 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.591228 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.608957 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.618524 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.632639 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.643844 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.658624 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.668705 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.682599 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.685427 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.685460 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.685469 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.685483 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.685492 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.692636 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.708772 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.719917 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.788132 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.788209 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.788220 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.788237 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.788250 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.882033 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rrnv6" event={"ID":"28aac5d8-57ac-4302-ab17-c07f33fcaffd","Type":"ContainerStarted","Data":"b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.884680 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.887206 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-bpbjn" event={"ID":"31830369-ada9-4ed3-8265-15051d6315f4","Type":"ContainerStarted","Data":"c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.889612 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c" exitCode=0 Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.889728 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.891443 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.891499 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.891525 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.891557 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.891581 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.893026 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-ml94r" event={"ID":"11d63fa9-8fe9-435e-87ce-e804aadd7def","Type":"ContainerStarted","Data":"422230fd5cc9ee29b03f1b5d2d2a7900aa288d78157421206c375d2a3fe009e1"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.913224 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.931270 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.946282 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.959871 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.986026 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.994186 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.994275 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.994294 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.994321 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.994334 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:30Z","lastTransitionTime":"2026-01-28T12:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:30 crc kubenswrapper[4685]: I0128 12:21:30.999215 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.017185 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.051689 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.065255 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.076077 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.088561 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.096465 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.096526 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.096540 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.096564 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.096583 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.098149 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.109182 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.121016 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.152697 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.199318 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.199394 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.199413 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.199443 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.199464 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.226657 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.226808 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.226860 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:21:39.226816768 +0000 UTC m=+50.314230603 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.226921 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.226974 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.227009 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227007 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227078 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227091 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227101 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227122 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227158 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227238 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:39.227212118 +0000 UTC m=+50.314625993 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227267 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:39.227254969 +0000 UTC m=+50.314668844 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227106 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227286 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227314 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:39.22730639 +0000 UTC m=+50.314720225 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.227334 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:39.227319611 +0000 UTC m=+50.314733486 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.302701 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.302752 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.302763 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.302781 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.302792 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.359315 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.405310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.405563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.405642 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.405728 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.405807 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.477416 4685 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.508618 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.508657 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.508667 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.508694 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.508725 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.519421 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 20:42:34.036567377 +0000 UTC Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.544944 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.545041 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.545075 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.544944 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.545129 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:31 crc kubenswrapper[4685]: E0128 12:21:31.545395 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.612461 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.612534 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.612557 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.612586 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.612610 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.659891 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.667015 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.715775 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.715822 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.715861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.715880 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.715889 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.819651 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.819991 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.820006 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.820027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.820040 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.898499 4685 generic.go:334] "Generic (PLEG): container finished" podID="6a28aa0f-04d6-471c-95f2-ef2268a29b62" containerID="f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80" exitCode=0 Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.899379 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerDied","Data":"f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.901107 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.903340 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-ml94r" event={"ID":"11d63fa9-8fe9-435e-87ce-e804aadd7def","Type":"ContainerStarted","Data":"0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.923227 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.923270 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.923283 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.923298 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.923309 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:31Z","lastTransitionTime":"2026-01-28T12:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.932620 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.944319 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.966135 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.980044 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:31 crc kubenswrapper[4685]: I0128 12:21:31.992553 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.007520 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.022449 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.026482 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.026530 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.026540 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.026556 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.026566 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.034256 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.047894 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.059989 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.072967 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.085753 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.100209 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.114475 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.125790 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.129999 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.131455 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.131496 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.131523 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.131537 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.136388 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.147639 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.157019 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.165008 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.205185 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.213250 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.225538 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.234501 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.234567 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.234584 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.234610 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.234628 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.238573 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.250872 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.261283 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.272218 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.284387 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.296822 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.337322 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.337373 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.337383 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.337400 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.337411 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.440071 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.440111 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.440121 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.440135 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.440144 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.520092 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 11:02:11.907398333 +0000 UTC Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.542380 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.542432 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.542443 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.542467 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.542480 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.645365 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.645410 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.645422 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.645441 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.645455 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.748261 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.748352 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.748372 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.748402 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.748421 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.851881 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.851933 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.851955 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.851982 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.852002 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.909771 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerStarted","Data":"0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.917410 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.921231 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.923361 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.929329 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.942020 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.953512 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.955630 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.955669 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.955680 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.955697 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.955706 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:32Z","lastTransitionTime":"2026-01-28T12:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.966044 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.983979 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:32 crc kubenswrapper[4685]: I0128 12:21:32.994904 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.009201 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.021412 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.036394 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.049736 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.058006 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.058045 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.058055 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.058072 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.058083 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.061953 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.074280 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.087803 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.105369 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.121701 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.152513 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.160540 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.160596 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.160609 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.160627 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.160640 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.166195 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.194559 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.207980 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.227356 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.244563 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.262081 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.262963 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.263011 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.263028 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.263051 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.263070 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.280778 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.300596 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.319305 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.333619 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.352690 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.365913 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.366191 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.366299 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.366402 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.366489 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.370039 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:33Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.469150 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.469693 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.469923 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.470099 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.470339 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.520453 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 15:28:31.378157104 +0000 UTC Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.545258 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:33 crc kubenswrapper[4685]: E0128 12:21:33.545695 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.545560 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:33 crc kubenswrapper[4685]: E0128 12:21:33.546020 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.545523 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:33 crc kubenswrapper[4685]: E0128 12:21:33.546308 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.573192 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.573259 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.573271 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.573288 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.573301 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.676926 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.676957 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.676967 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.676982 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.676990 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.779843 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.779900 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.779918 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.779946 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.779965 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.882731 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.882831 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.882852 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.882877 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.882894 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.929816 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88"} Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.985775 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.985837 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.985859 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.985887 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:33 crc kubenswrapper[4685]: I0128 12:21:33.985908 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:33Z","lastTransitionTime":"2026-01-28T12:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.088493 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.088806 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.088933 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.089059 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.089198 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.191990 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.192049 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.192066 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.192088 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.192103 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.296788 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.296835 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.296851 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.296875 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.296893 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.399769 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.399828 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.399848 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.399871 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.399889 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.503199 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.503254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.503269 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.503302 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.503315 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.521558 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 23:06:47.396613918 +0000 UTC Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.606508 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.606582 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.606629 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.606654 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.606668 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.709370 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.709422 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.709459 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.709480 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.709495 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.812720 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.813386 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.813529 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.813646 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.813770 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.920612 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.920654 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.920666 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.920682 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.920695 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:34Z","lastTransitionTime":"2026-01-28T12:21:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.935496 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183"} Jan 28 12:21:34 crc kubenswrapper[4685]: I0128 12:21:34.935554 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.022580 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.022620 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.022631 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.022647 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.022659 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.125089 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.125127 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.125138 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.125154 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.125165 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.227653 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.227688 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.227697 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.227711 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.227721 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.330254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.330294 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.330305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.330318 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.330328 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.433565 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.433933 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.433950 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.433975 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.433993 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.521958 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 03:34:23.841245465 +0000 UTC Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.537091 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.537155 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.537216 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.537249 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.537272 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.545163 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.545239 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:35 crc kubenswrapper[4685]: E0128 12:21:35.545334 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.545476 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:35 crc kubenswrapper[4685]: E0128 12:21:35.545569 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:35 crc kubenswrapper[4685]: E0128 12:21:35.545805 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.640021 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.640102 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.640131 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.640206 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.640234 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.743633 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.743677 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.743690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.743707 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.743720 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.847978 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.848091 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.848118 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.848151 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.848206 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.951149 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.951217 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.951229 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.951250 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:35 crc kubenswrapper[4685]: I0128 12:21:35.951266 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:35Z","lastTransitionTime":"2026-01-28T12:21:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.054013 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.054060 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.054084 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.054102 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.054112 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.156618 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.156660 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.156671 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.156687 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.156698 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.260367 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.260418 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.260430 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.260455 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.260469 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.364129 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.364253 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.364278 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.364314 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.364339 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.421474 4685 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.467282 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.467341 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.467358 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.467376 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.467390 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.522469 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 11:12:59.995988191 +0000 UTC Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.570880 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.570948 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.570971 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.571000 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.571023 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.674381 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.674430 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.674449 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.674476 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.674496 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.777124 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.777211 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.777229 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.777256 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.777274 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.880919 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.880987 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.881010 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.881041 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.881060 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.945633 4685 generic.go:334] "Generic (PLEG): container finished" podID="6a28aa0f-04d6-471c-95f2-ef2268a29b62" containerID="0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500" exitCode=0 Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.945665 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerDied","Data":"0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500"} Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.963617 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.984455 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.984799 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.984838 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.984873 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.984889 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:36 crc kubenswrapper[4685]: I0128 12:21:36.984901 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:36Z","lastTransitionTime":"2026-01-28T12:21:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.001257 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:36Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.022863 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.034900 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.052525 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.069479 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.086113 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.088709 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.088768 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.088778 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.088797 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.088807 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.102395 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.115791 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.131265 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.147833 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.161846 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.174052 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:37Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.191487 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.191543 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.191555 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.191574 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.191584 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.294618 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.294659 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.294669 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.294687 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.294698 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.396722 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.396782 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.396801 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.396829 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.396846 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.500625 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.500860 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.501003 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.501154 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.501277 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.522940 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 21:15:34.755462726 +0000 UTC Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.545726 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.545780 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.545736 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:37 crc kubenswrapper[4685]: E0128 12:21:37.545896 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:37 crc kubenswrapper[4685]: E0128 12:21:37.546252 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:37 crc kubenswrapper[4685]: E0128 12:21:37.546368 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.546901 4685 scope.go:117] "RemoveContainer" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.603888 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.604137 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.604146 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.604160 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.604184 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.706424 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.706477 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.706493 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.706511 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.706522 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.809288 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.809334 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.809350 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.809374 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.809389 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.912425 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.912500 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.912518 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.912546 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.912563 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:37Z","lastTransitionTime":"2026-01-28T12:21:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:37 crc kubenswrapper[4685]: I0128 12:21:37.954635 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.015847 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.015902 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.015915 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.015939 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.015954 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.119624 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.119961 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.120100 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.120289 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.120452 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.126587 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf"] Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.127613 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.129632 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.130977 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.185067 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.198592 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/94fa291c-6b2a-4a3b-b70d-def6dd28589b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.198671 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/94fa291c-6b2a-4a3b-b70d-def6dd28589b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.198703 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/94fa291c-6b2a-4a3b-b70d-def6dd28589b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.198763 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk4p2\" (UniqueName: \"kubernetes.io/projected/94fa291c-6b2a-4a3b-b70d-def6dd28589b-kube-api-access-pk4p2\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.200526 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.212389 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.222795 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.222837 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.222850 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.222869 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.222884 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.226223 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.244918 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.258156 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.282692 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.295476 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.299734 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk4p2\" (UniqueName: \"kubernetes.io/projected/94fa291c-6b2a-4a3b-b70d-def6dd28589b-kube-api-access-pk4p2\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.299806 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/94fa291c-6b2a-4a3b-b70d-def6dd28589b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.299860 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/94fa291c-6b2a-4a3b-b70d-def6dd28589b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.299887 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/94fa291c-6b2a-4a3b-b70d-def6dd28589b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.301440 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/94fa291c-6b2a-4a3b-b70d-def6dd28589b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.301459 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/94fa291c-6b2a-4a3b-b70d-def6dd28589b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.309588 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.314623 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/94fa291c-6b2a-4a3b-b70d-def6dd28589b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.325695 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.327283 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk4p2\" (UniqueName: \"kubernetes.io/projected/94fa291c-6b2a-4a3b-b70d-def6dd28589b-kube-api-access-pk4p2\") pod \"ovnkube-control-plane-749d76644c-m96cf\" (UID: \"94fa291c-6b2a-4a3b-b70d-def6dd28589b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.327442 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.327495 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.327521 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.327551 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.327575 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.354933 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.365870 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.389389 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.401649 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.425960 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:38Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.431081 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.431127 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.431138 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.431154 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.431180 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.443694 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" Jan 28 12:21:38 crc kubenswrapper[4685]: W0128 12:21:38.461849 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94fa291c_6b2a_4a3b_b70d_def6dd28589b.slice/crio-569afffae829eb26fc52b3f1aeb99fd747be2e86b79d614c9f013aa84d013f56 WatchSource:0}: Error finding container 569afffae829eb26fc52b3f1aeb99fd747be2e86b79d614c9f013aa84d013f56: Status 404 returned error can't find the container with id 569afffae829eb26fc52b3f1aeb99fd747be2e86b79d614c9f013aa84d013f56 Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.523853 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 02:41:46.788853512 +0000 UTC Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.533649 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.533719 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.533732 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.533755 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.533773 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.636972 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.637015 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.637024 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.637039 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.637052 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.740209 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.740269 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.740286 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.740310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.740327 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.843034 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.843077 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.843087 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.843108 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.843119 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.945721 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.945799 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.945817 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.945845 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.945862 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:38Z","lastTransitionTime":"2026-01-28T12:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:38 crc kubenswrapper[4685]: I0128 12:21:38.958904 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" event={"ID":"94fa291c-6b2a-4a3b-b70d-def6dd28589b","Type":"ContainerStarted","Data":"569afffae829eb26fc52b3f1aeb99fd747be2e86b79d614c9f013aa84d013f56"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.049382 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.049431 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.049440 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.049455 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.049465 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.152986 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.153060 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.153082 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.153112 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.153130 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.256388 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.256442 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.256458 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.256478 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.256491 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.288333 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-5x4kp"] Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.288763 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.288826 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.312401 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.312554 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.312630 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:21:55.312582723 +0000 UTC m=+66.399996598 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.312715 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.312717 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.312801 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:55.312778188 +0000 UTC m=+66.400192043 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.312943 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.313024 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313252 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313327 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:55.313312602 +0000 UTC m=+66.400726467 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313395 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313424 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313440 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313479 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:55.313467976 +0000 UTC m=+66.400881831 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313408 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313649 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313717 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.313824 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:21:55.313805394 +0000 UTC m=+66.401219229 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.314527 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.334369 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.348792 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.360024 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.360062 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.360077 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.360098 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.360115 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.365164 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.386033 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.409421 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.413672 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls7x7\" (UniqueName: \"kubernetes.io/projected/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-kube-api-access-ls7x7\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.413771 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.428218 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.449145 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.464254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.464330 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.464346 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.464372 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.464391 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.466273 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.491652 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.502785 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.514670 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls7x7\" (UniqueName: \"kubernetes.io/projected/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-kube-api-access-ls7x7\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.514781 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.514932 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.515015 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:21:40.014993486 +0000 UTC m=+51.102407331 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.516584 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.524329 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 00:54:21.358383545 +0000 UTC Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.529053 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.532948 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls7x7\" (UniqueName: \"kubernetes.io/projected/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-kube-api-access-ls7x7\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.545738 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.545760 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.545782 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.546094 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.546236 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.546326 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.556613 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.567803 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.567843 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.567857 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.567874 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.567885 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.568527 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.587908 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.670383 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.670422 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.670433 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.670451 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.670462 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.772690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.772728 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.772740 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.772756 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.772769 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.854579 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.854662 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.854686 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.854717 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.854743 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.874552 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.880243 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.880309 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.880325 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.880351 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.880369 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.903073 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.908495 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.908575 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.908599 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.908632 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.908654 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:39 crc kubenswrapper[4685]: E0128 12:21:39.963225 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.964033 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.965861 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d"} Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.981370 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.981427 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.981442 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.981462 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:39 crc kubenswrapper[4685]: I0128 12:21:39.981477 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:39Z","lastTransitionTime":"2026-01-28T12:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: E0128 12:21:40.001704 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:39Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.005344 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.005375 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.005386 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.005401 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.005411 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: E0128 12:21:40.019561 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.019667 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:40 crc kubenswrapper[4685]: E0128 12:21:40.019676 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:21:40 crc kubenswrapper[4685]: E0128 12:21:40.019779 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:40 crc kubenswrapper[4685]: E0128 12:21:40.019835 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:21:41.019818404 +0000 UTC m=+52.107232239 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.021315 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.021364 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.021381 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.021411 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.021429 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.124512 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.124562 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.124577 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.124596 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.124609 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.227818 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.227892 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.227915 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.227946 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.227967 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.331939 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.331994 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.332008 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.332027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.332043 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.434587 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.434893 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.434907 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.434926 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.434938 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.525371 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 18:19:21.8063707 +0000 UTC Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.537940 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.537993 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.538009 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.538029 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.538042 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.572758 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.596877 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.617047 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.635578 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.640215 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.640269 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.640282 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.640301 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.640314 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.652316 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.698972 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.715781 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.734713 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.743192 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.743288 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.743304 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.743324 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.743335 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.753967 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.771481 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.793371 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.804533 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.820024 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.832511 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.845562 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.845613 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.845625 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.845644 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.845656 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.852645 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.866914 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.948131 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.948204 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.948218 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.948236 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.948248 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:40Z","lastTransitionTime":"2026-01-28T12:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.971141 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" event={"ID":"94fa291c-6b2a-4a3b-b70d-def6dd28589b","Type":"ContainerStarted","Data":"452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.975635 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.978649 4685 generic.go:334] "Generic (PLEG): container finished" podID="6a28aa0f-04d6-471c-95f2-ef2268a29b62" containerID="c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8" exitCode=0 Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.978720 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerDied","Data":"c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8"} Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.979208 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:40 crc kubenswrapper[4685]: I0128 12:21:40.997381 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:40Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.014343 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.032353 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:41 crc kubenswrapper[4685]: E0128 12:21:41.032539 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:41 crc kubenswrapper[4685]: E0128 12:21:41.032614 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:21:43.032591959 +0000 UTC m=+54.120005794 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.033937 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.051657 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.051711 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.051728 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.051750 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.051766 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.064352 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.077685 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.096223 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.113883 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.136311 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.153912 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.153950 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.153960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.153979 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.153991 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.156531 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.180409 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.195559 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.208419 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.227990 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.247134 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.257241 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.257296 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.257312 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.257331 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.257343 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.263125 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.282236 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.302122 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.322191 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.335831 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.350887 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.364690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.365006 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.365091 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.365223 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.365314 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.369392 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.383477 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.397770 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.419740 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.431518 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.450639 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.464912 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.468707 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.468738 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.468746 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.468760 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.468768 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.480582 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.498824 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.515313 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.526530 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 02:13:47.459935061 +0000 UTC Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.531224 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.544817 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.544900 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.544900 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.544825 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:41 crc kubenswrapper[4685]: E0128 12:21:41.544977 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:41 crc kubenswrapper[4685]: E0128 12:21:41.545117 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:41 crc kubenswrapper[4685]: E0128 12:21:41.545217 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:41 crc kubenswrapper[4685]: E0128 12:21:41.545288 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.545487 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:41Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.571519 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.571572 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.571583 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.571603 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.571617 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.674956 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.675004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.675015 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.675033 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.675048 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.778490 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.778968 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.778983 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.779006 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.779025 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.881963 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.882023 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.882045 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.882283 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.882307 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.984907 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.984962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.984981 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.985003 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:41 crc kubenswrapper[4685]: I0128 12:21:41.985016 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:41Z","lastTransitionTime":"2026-01-28T12:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.087587 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.087664 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.087683 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.087712 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.087739 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.191323 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.191383 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.191403 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.191429 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.191446 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.293611 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.293689 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.293714 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.293739 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.293754 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.397210 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.397269 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.397279 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.397296 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.397308 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.500130 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.500222 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.500238 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.500261 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.500275 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.527847 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 15:16:59.588243099 +0000 UTC Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.603919 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.603981 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.603994 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.604018 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.604038 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.707291 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.707368 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.707391 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.707423 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.707448 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.810748 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.811095 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.811278 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.811458 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.811632 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.915393 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.915695 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.915835 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.916016 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.916194 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:42Z","lastTransitionTime":"2026-01-28T12:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:42 crc kubenswrapper[4685]: I0128 12:21:42.999825 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" event={"ID":"94fa291c-6b2a-4a3b-b70d-def6dd28589b","Type":"ContainerStarted","Data":"3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.019871 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.019963 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.019975 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.020040 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.020053 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.054468 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:43 crc kubenswrapper[4685]: E0128 12:21:43.054688 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:43 crc kubenswrapper[4685]: E0128 12:21:43.054771 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:21:47.05474755 +0000 UTC m=+58.142161385 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.123162 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.123239 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.123257 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.123283 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.123301 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.225520 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.225598 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.225613 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.225632 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.225648 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.328604 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.328791 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.328825 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.328851 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.328869 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.431856 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.431897 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.431914 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.431932 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.431943 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.528531 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 00:50:52.959944761 +0000 UTC Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.534340 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.534376 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.534387 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.534403 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.534412 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.544895 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.544900 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.544918 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.545017 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:43 crc kubenswrapper[4685]: E0128 12:21:43.545131 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:43 crc kubenswrapper[4685]: E0128 12:21:43.545282 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:43 crc kubenswrapper[4685]: E0128 12:21:43.545418 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:43 crc kubenswrapper[4685]: E0128 12:21:43.545716 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.637354 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.637400 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.637408 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.637425 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.637434 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.740666 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.740726 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.740736 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.740754 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.740765 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.843894 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.843962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.843980 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.844008 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.844028 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.946878 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.946953 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.947005 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.947039 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:43 crc kubenswrapper[4685]: I0128 12:21:43.947063 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:43Z","lastTransitionTime":"2026-01-28T12:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.006840 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerStarted","Data":"c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.050784 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.050842 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.050859 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.050887 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.050907 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.153809 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.153869 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.153886 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.153916 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.153932 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.256536 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.256909 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.256924 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.256942 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.257050 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.359625 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.359684 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.359701 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.359727 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.359744 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.463076 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.463147 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.463198 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.463229 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.463253 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.528928 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 07:49:14.528901301 +0000 UTC Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.565580 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.565618 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.565628 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.565645 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.565657 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.668273 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.668334 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.668346 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.668366 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.668380 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.771409 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.771461 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.771475 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.771494 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.771506 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.874187 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.874220 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.874231 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.874254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.874267 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.976037 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.976076 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.976087 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.976105 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:44 crc kubenswrapper[4685]: I0128 12:21:44.976115 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:44Z","lastTransitionTime":"2026-01-28T12:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.035300 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.052691 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.068297 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.080524 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.080587 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.080608 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.080634 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.080650 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.102822 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.117425 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.136101 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.153826 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.174591 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.183294 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.183357 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.183373 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.183397 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.183413 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.194150 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.215340 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.230980 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.243154 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.260728 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.273918 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.286067 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.286092 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.286100 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.286113 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.286121 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.288102 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.302027 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:45Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.389096 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.389343 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.389406 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.389472 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.389528 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.493221 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.493292 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.493310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.493342 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.493360 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.529458 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 11:39:50.270737915 +0000 UTC Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.544821 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.544879 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.544928 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.545437 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:45 crc kubenswrapper[4685]: E0128 12:21:45.545660 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:45 crc kubenswrapper[4685]: E0128 12:21:45.545791 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:45 crc kubenswrapper[4685]: E0128 12:21:45.545905 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:45 crc kubenswrapper[4685]: E0128 12:21:45.546089 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.596474 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.596515 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.596526 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.596559 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.596575 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.699508 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.699563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.699582 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.699607 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.699625 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.802687 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.802802 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.802822 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.802855 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.802883 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.906541 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.906622 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.906645 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.906676 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:45 crc kubenswrapper[4685]: I0128 12:21:45.906699 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:45Z","lastTransitionTime":"2026-01-28T12:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.010350 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.010454 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.010471 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.010505 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.010523 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.022141 4685 generic.go:334] "Generic (PLEG): container finished" podID="6a28aa0f-04d6-471c-95f2-ef2268a29b62" containerID="c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3" exitCode=0 Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.022216 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerDied","Data":"c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.113652 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.113720 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.113748 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.113780 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.113804 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.217600 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.217648 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.217658 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.217678 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.217689 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.320653 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.320722 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.320740 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.320769 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.320787 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.424001 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.424082 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.424116 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.424146 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.424167 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.527819 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.527884 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.527899 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.527925 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.527942 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.530149 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 07:19:52.825778406 +0000 UTC Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.631364 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.631478 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.631504 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.631538 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.631562 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.734066 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.734104 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.734115 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.734133 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.734144 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.837196 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.837267 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.837287 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.837313 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.837331 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.940659 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.940734 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.940758 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.940788 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:46 crc kubenswrapper[4685]: I0128 12:21:46.940811 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:46Z","lastTransitionTime":"2026-01-28T12:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.036131 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.044225 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.044266 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.044279 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.044299 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.044313 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.071857 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.099352 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:47 crc kubenswrapper[4685]: E0128 12:21:47.099773 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:47 crc kubenswrapper[4685]: E0128 12:21:47.099933 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:21:55.099903147 +0000 UTC m=+66.187316982 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.105000 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.128824 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.147905 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.147968 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.147985 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.148009 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.148027 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.149824 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.170040 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.188466 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.205774 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.219835 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.231842 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.244092 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.250817 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.250851 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.250860 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.250877 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.250887 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.261220 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.273660 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.286419 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.298529 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.311590 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.324227 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:47Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.353931 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.353978 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.353992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.354014 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.354029 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.457197 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.457253 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.457265 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.457287 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.457299 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.530419 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 16:52:32.270105456 +0000 UTC Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.545097 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.545137 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.545142 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.545234 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:47 crc kubenswrapper[4685]: E0128 12:21:47.545285 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:47 crc kubenswrapper[4685]: E0128 12:21:47.545403 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:47 crc kubenswrapper[4685]: E0128 12:21:47.545545 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:47 crc kubenswrapper[4685]: E0128 12:21:47.545699 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.560830 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.560917 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.560942 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.560972 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.560996 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.664414 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.664475 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.664492 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.664516 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.664538 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.767831 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.767912 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.767931 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.767957 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.767976 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.872117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.872161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.872188 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.872208 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.872221 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.974940 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.974996 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.975014 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.975037 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:47 crc kubenswrapper[4685]: I0128 12:21:47.975052 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:47Z","lastTransitionTime":"2026-01-28T12:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.047351 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.057067 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerStarted","Data":"ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.081443 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.081533 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.081544 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.081567 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.081578 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.185667 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.185702 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.185712 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.185726 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.185737 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.288522 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.288584 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.288602 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.288629 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.288652 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.392763 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.392834 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.392857 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.392884 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.392900 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.495840 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.495909 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.495929 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.495959 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.495978 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.531567 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 23:47:32.705256405 +0000 UTC Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.598194 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.598264 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.598285 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.598310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.598329 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.702252 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.702300 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.702316 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.702341 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.702358 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.805739 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.805794 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.805812 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.805839 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.805855 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.908564 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.908621 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.908639 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.908658 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:48 crc kubenswrapper[4685]: I0128 12:21:48.908670 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:48Z","lastTransitionTime":"2026-01-28T12:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.011477 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.011542 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.011556 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.011576 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.011590 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.076285 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.095195 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.115409 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.115505 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.115526 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.115551 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.115569 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.117295 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.135688 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.161728 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.184749 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.200731 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.218647 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.218681 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.218690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.218706 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.218715 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.219604 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.241219 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.256022 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.268990 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.281766 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.294336 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.306938 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.319056 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.320617 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.320652 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.320660 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.320673 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.320682 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.329381 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.341977 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.360841 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.379098 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.394993 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.407396 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.423816 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.423868 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.423882 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.423901 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.423915 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.435772 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.447374 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.462526 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.480265 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.497579 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.512003 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.526515 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.526582 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.526600 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.526627 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.526645 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.531524 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.531764 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 19:06:38.855445637 +0000 UTC Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.545031 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:49 crc kubenswrapper[4685]: E0128 12:21:49.545213 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.545234 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.545314 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:49 crc kubenswrapper[4685]: E0128 12:21:49.545399 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.545451 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:49 crc kubenswrapper[4685]: E0128 12:21:49.545575 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:49 crc kubenswrapper[4685]: E0128 12:21:49.545797 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.545808 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.556919 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.572480 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.586654 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:49Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.629899 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.629926 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.629934 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.629948 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.629957 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.733371 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.733453 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.733476 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.733508 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.733530 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.837467 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.837537 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.837554 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.837581 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.837600 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.940427 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.940491 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.940509 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.940532 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:49 crc kubenswrapper[4685]: I0128 12:21:49.940552 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:49Z","lastTransitionTime":"2026-01-28T12:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.043551 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.043918 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.043939 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.043972 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.043991 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.064354 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.064417 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.064435 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.064459 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.064476 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: E0128 12:21:50.083092 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.088099 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.088149 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.088161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.088202 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.088226 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: E0128 12:21:50.107118 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.112337 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.112429 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.112448 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.112476 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.112494 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: E0128 12:21:50.132051 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.137052 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.137110 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.137128 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.137148 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.137166 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: E0128 12:21:50.152810 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.156462 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.156502 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.156518 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.156538 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.156553 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: E0128 12:21:50.172117 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: E0128 12:21:50.172282 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.174057 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.174094 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.174103 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.174117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.174128 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.276739 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.276778 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.276791 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.276809 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.276823 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.379390 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.379433 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.379442 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.379456 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.379465 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.482915 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.482985 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.483003 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.483027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.483048 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.531903 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 22:33:38.295169102 +0000 UTC Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.565277 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.582082 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.586841 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.586974 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.587025 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.587051 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.587068 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.603576 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.620506 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.638626 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.657683 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.672841 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.685400 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.690380 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.690425 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.690441 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.690470 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.690488 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.709650 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.722885 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.739390 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.757126 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.773920 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.791217 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.792995 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.793036 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.793051 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.793076 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.793093 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.816493 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.832449 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:50Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.896700 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.896768 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.896785 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.896810 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:50 crc kubenswrapper[4685]: I0128 12:21:50.896826 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:50Z","lastTransitionTime":"2026-01-28T12:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.000370 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.000442 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.000461 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.000487 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.000507 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.102967 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.103109 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.103129 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.103155 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.103210 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.205925 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.206010 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.206049 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.206083 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.206105 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.308299 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.308409 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.308419 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.308436 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.308464 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.410996 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.411056 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.411073 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.411101 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.411119 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.514267 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.514315 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.514323 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.514338 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.514346 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.532555 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 18:24:54.273356436 +0000 UTC Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.544971 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.545078 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:51 crc kubenswrapper[4685]: E0128 12:21:51.545142 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.544980 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:51 crc kubenswrapper[4685]: E0128 12:21:51.545231 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:51 crc kubenswrapper[4685]: E0128 12:21:51.545282 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.545364 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:51 crc kubenswrapper[4685]: E0128 12:21:51.545447 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.617972 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.618051 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.618076 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.618108 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.618135 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.721032 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.721117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.721142 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.721221 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.721252 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.824144 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.824241 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.824257 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.824282 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.824303 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.927632 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.927691 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.927707 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.927732 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:51 crc kubenswrapper[4685]: I0128 12:21:51.927749 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:51Z","lastTransitionTime":"2026-01-28T12:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.030767 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.030812 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.030821 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.030840 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.030852 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.135690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.135753 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.135770 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.135794 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.135812 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.239022 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.239085 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.239101 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.239124 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.239141 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.323068 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.332932 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.337111 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.344549 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.344581 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.344612 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.344629 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.344639 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.351886 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.369768 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.383654 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.408978 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.426114 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.445150 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.447243 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.447305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.447317 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.447337 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.447365 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.463368 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.480216 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.498016 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.511758 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.526388 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.533554 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 20:37:10.794911344 +0000 UTC Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.541945 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.550382 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.550451 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.550470 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.550494 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.550511 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.554813 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.567277 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.577691 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:52Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.654052 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.654124 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.654141 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.654191 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.654222 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.757671 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.757731 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.757743 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.757787 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.757801 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.861663 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.861736 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.861749 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.861772 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.861786 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.965413 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.965493 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.965507 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.965527 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:52 crc kubenswrapper[4685]: I0128 12:21:52.965541 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:52Z","lastTransitionTime":"2026-01-28T12:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.068489 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.068526 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.068534 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.068552 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.068561 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.081564 4685 generic.go:334] "Generic (PLEG): container finished" podID="6a28aa0f-04d6-471c-95f2-ef2268a29b62" containerID="ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7" exitCode=0 Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.081619 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerDied","Data":"ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.103277 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.122724 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.143591 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.158098 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.171665 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.171715 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.171734 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.171759 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.171777 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.186644 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.215089 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.238696 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.258964 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.273764 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.274448 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.274477 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.274486 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.274501 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.274511 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.285538 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.305958 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.323773 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.347506 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.369803 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.383891 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.383926 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.383937 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.383954 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.383964 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.390047 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.410947 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.430393 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.473809 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.487319 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.487372 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.487386 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.487408 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.487422 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.491789 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.510956 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.525475 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.534567 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 19:44:48.438439078 +0000 UTC Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.535905 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.544948 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.545004 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.544963 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:53 crc kubenswrapper[4685]: E0128 12:21:53.545087 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.545005 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:53 crc kubenswrapper[4685]: E0128 12:21:53.545206 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:53 crc kubenswrapper[4685]: E0128 12:21:53.545244 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:53 crc kubenswrapper[4685]: E0128 12:21:53.545301 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.549019 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.561947 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.574309 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.587304 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.589564 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.589590 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.589599 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.589615 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.589626 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.601390 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.612097 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.625090 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.637297 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.649129 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.662151 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.674199 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.687066 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.692006 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.692037 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.692045 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.692060 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.692069 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.706412 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:53Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.795428 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.795480 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.795497 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.795521 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.795542 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.899067 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.899130 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.899150 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.899203 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:53 crc kubenswrapper[4685]: I0128 12:21:53.899225 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:53Z","lastTransitionTime":"2026-01-28T12:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.001698 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.001737 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.001746 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.001760 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.001769 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.104795 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.105159 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.105380 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.105523 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.105661 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.208964 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.209005 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.209025 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.209049 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.209069 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.311563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.311617 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.311629 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.311649 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.311663 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.415449 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.415512 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.415539 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.415566 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.415588 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.519300 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.519342 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.519354 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.519372 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.519385 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.535703 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 18:03:43.044145759 +0000 UTC Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.621854 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.621912 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.621923 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.621942 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.621957 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.730538 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.730664 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.730690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.730721 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.730750 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.834143 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.834285 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.834316 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.834351 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.834377 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.969701 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.969770 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.969785 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.969809 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:54 crc kubenswrapper[4685]: I0128 12:21:54.969822 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:54Z","lastTransitionTime":"2026-01-28T12:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.073946 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.073992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.074001 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.074016 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.074026 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.092393 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerStarted","Data":"c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.107981 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.108214 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.108326 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:22:11.10830776 +0000 UTC m=+82.195721595 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.109444 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.120616 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.140739 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.156750 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.175855 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.176491 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.176538 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.176548 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.176562 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.176571 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.202730 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.228785 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.251883 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.267190 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.279476 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.279536 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.279547 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.279568 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.279579 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.283007 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.298024 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.314770 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.331044 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.348746 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.362081 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.373253 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.381814 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.381850 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.381861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.381878 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.381889 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.384022 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:55Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.411425 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411554 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:27.411535353 +0000 UTC m=+98.498949188 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.411668 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.411708 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.411742 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.411769 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411834 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411853 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411864 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411875 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411881 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411891 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411988 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411986 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.411902 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:22:27.411893572 +0000 UTC m=+98.499307407 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.412052 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:22:27.412031306 +0000 UTC m=+98.499445151 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.412072 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:22:27.412064617 +0000 UTC m=+98.499478462 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.412090 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:22:27.412081577 +0000 UTC m=+98.499495422 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.485721 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.485789 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.485801 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.485820 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.485832 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.536233 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 17:04:39.295936292 +0000 UTC Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.544821 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.544847 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.544927 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.544989 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.545153 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.545444 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.545667 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:55 crc kubenswrapper[4685]: E0128 12:21:55.545880 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.588610 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.588651 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.588660 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.588674 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.588684 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.691512 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.691579 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.691596 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.691622 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.691640 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.795490 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.795563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.795594 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.795627 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.795649 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.898318 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.898372 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.898399 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.898414 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:55 crc kubenswrapper[4685]: I0128 12:21:55.898426 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:55Z","lastTransitionTime":"2026-01-28T12:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.001014 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.001069 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.001091 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.001124 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.001148 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.098235 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/0.log" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.102572 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9" exitCode=1 Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.102704 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.102928 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.102967 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.102984 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.103004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.103023 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.104673 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.104731 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.105103 4685 scope.go:117] "RemoveContainer" containerID="652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.107860 4685 generic.go:334] "Generic (PLEG): container finished" podID="6a28aa0f-04d6-471c-95f2-ef2268a29b62" containerID="c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d" exitCode=0 Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.107920 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerDied","Data":"c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.131700 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.137012 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.142199 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.157793 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.176224 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.191840 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.204073 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.205688 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.205713 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.205723 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.205740 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.205751 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.273502 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.289665 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.303989 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.308488 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.308521 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.308532 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.308550 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.308562 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.315546 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.336229 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"message\\\":\\\"y.go:160\\\\nI0128 12:21:55.274903 5990 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.274959 5990 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.275073 5990 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275377 5990 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275465 5990 factory.go:656] Stopping watch factory\\\\nI0128 12:21:55.275636 5990 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275686 5990 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275852 5990 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.276036 5990 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.348268 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.362024 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.376035 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.387884 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.401422 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.411424 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.411485 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.411505 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.411535 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.411556 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.420777 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.436543 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.456534 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.478877 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.500397 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.514809 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.514886 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.514900 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.514920 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.514934 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.518082 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.536504 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 22:59:59.486236568 +0000 UTC Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.549625 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"message\\\":\\\"y.go:160\\\\nI0128 12:21:55.274903 5990 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.274959 5990 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.275073 5990 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275377 5990 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275465 5990 factory.go:656] Stopping watch factory\\\\nI0128 12:21:55.275636 5990 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275686 5990 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275852 5990 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.276036 5990 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.566604 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.584958 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.607231 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.617106 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.617153 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.617205 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.617238 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.617262 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.626816 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.648452 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.667092 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.688737 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.703261 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.721061 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.721134 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.721147 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.721206 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.721220 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.722566 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.742622 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.762133 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.785888 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:56Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.828648 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.828705 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.828722 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.828746 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.828764 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.932617 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.932692 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.932711 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.932738 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:56 crc kubenswrapper[4685]: I0128 12:21:56.932757 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:56Z","lastTransitionTime":"2026-01-28T12:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.035324 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.035356 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.035366 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.035383 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.035392 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.069284 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.112766 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/0.log" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.114941 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.115466 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.121332 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" event={"ID":"6a28aa0f-04d6-471c-95f2-ef2268a29b62","Type":"ContainerStarted","Data":"9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.133381 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.145228 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.145278 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.145292 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.145317 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.145331 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.156953 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.170941 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.182989 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.197640 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.211315 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.222383 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.244382 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"message\\\":\\\"y.go:160\\\\nI0128 12:21:55.274903 5990 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.274959 5990 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.275073 5990 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275377 5990 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275465 5990 factory.go:656] Stopping watch factory\\\\nI0128 12:21:55.275636 5990 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275686 5990 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275852 5990 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.276036 5990 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.250335 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.250383 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.250397 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.250418 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.250433 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.260716 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.279103 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.293917 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.309517 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.330987 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.345583 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.356492 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.356547 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.356558 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.356581 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.356594 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.363737 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.377551 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.393858 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.413094 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.431603 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.454120 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.459394 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.459482 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.459508 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.459544 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.459570 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.477690 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.500364 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.516163 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.538717 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 20:58:33.835895496 +0000 UTC Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.538894 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.545069 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.545138 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.545161 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:57 crc kubenswrapper[4685]: E0128 12:21:57.545196 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:57 crc kubenswrapper[4685]: E0128 12:21:57.545293 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:57 crc kubenswrapper[4685]: E0128 12:21:57.545408 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.545430 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:57 crc kubenswrapper[4685]: E0128 12:21:57.545624 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.562111 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"message\\\":\\\"y.go:160\\\\nI0128 12:21:55.274903 5990 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.274959 5990 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.275073 5990 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275377 5990 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275465 5990 factory.go:656] Stopping watch factory\\\\nI0128 12:21:55.275636 5990 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275686 5990 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275852 5990 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.276036 5990 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.562978 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.563018 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.563035 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.563058 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.563076 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.577883 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.597215 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.615502 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.640370 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.657545 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.665977 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.666016 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.666027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.666044 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.666055 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.683747 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.703447 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.718415 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.735418 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:57Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.772367 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.772441 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.772451 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.772471 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.772482 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.875308 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.875351 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.875364 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.875384 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.875397 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.978023 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.978053 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.978060 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.978074 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:57 crc kubenswrapper[4685]: I0128 12:21:57.978083 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:57Z","lastTransitionTime":"2026-01-28T12:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.080516 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.080556 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.080567 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.080584 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.080595 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.183000 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.183029 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.183037 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.183051 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.183060 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.285024 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.285053 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.285065 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.285081 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.285092 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.387671 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.387710 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.387721 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.387737 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.387748 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.490248 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.490318 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.490336 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.490366 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.490385 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.539500 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 19:49:44.536631072 +0000 UTC Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.593265 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.593343 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.593368 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.593393 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.593412 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.697022 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.697067 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.697078 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.697095 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.697106 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.799442 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.799477 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.799488 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.799504 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.799515 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.902526 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.902568 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.902581 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.902599 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:58 crc kubenswrapper[4685]: I0128 12:21:58.902611 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:58Z","lastTransitionTime":"2026-01-28T12:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.005579 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.005662 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.005682 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.005711 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.005730 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.108681 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.108728 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.108748 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.108771 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.108787 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.130543 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/1.log" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.131148 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/0.log" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.135299 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437" exitCode=1 Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.135332 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.135365 4685 scope.go:117] "RemoveContainer" containerID="652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.136522 4685 scope.go:117] "RemoveContainer" containerID="84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437" Jan 28 12:21:59 crc kubenswrapper[4685]: E0128 12:21:59.136805 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-b85rl_openshift-ovn-kubernetes(6fd64f35-81dc-4978-84e8-a746e9a79ccd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.151800 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.173187 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.188146 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.202155 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.211802 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.211858 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.211872 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.211890 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.211902 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.224122 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.239952 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.257277 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.274305 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.289708 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.301322 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.314865 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.314922 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.314940 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.314964 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.314982 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.325329 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"message\\\":\\\"y.go:160\\\\nI0128 12:21:55.274903 5990 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.274959 5990 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.275073 5990 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275377 5990 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275465 5990 factory.go:656] Stopping watch factory\\\\nI0128 12:21:55.275636 5990 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275686 5990 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275852 5990 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.276036 5990 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.337400 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.350835 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.366324 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.379507 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.390693 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.411506 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:21:59Z is after 2025-08-24T17:21:41Z" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.417836 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.418013 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.418109 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.418208 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.418297 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.526321 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.526626 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.526713 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.526818 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.526912 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.540558 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 01:08:41.103247254 +0000 UTC Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.544986 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.544986 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:21:59 crc kubenswrapper[4685]: E0128 12:21:59.545151 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.544998 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:21:59 crc kubenswrapper[4685]: E0128 12:21:59.545209 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:21:59 crc kubenswrapper[4685]: E0128 12:21:59.545289 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.544986 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:21:59 crc kubenswrapper[4685]: E0128 12:21:59.545409 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.629321 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.629377 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.629388 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.629406 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.629418 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.732788 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.732850 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.732867 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.732892 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.732911 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.835771 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.836049 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.836247 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.836352 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.836585 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.939194 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.939227 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.939238 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.939254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:21:59 crc kubenswrapper[4685]: I0128 12:21:59.939264 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:21:59Z","lastTransitionTime":"2026-01-28T12:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.041429 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.041496 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.041514 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.041539 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.041555 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.141532 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/1.log" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.144390 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.144434 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.144456 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.144491 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.144546 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.185716 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.185759 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.185770 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.185787 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.185798 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: E0128 12:22:00.198510 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.203001 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.203409 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.203582 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.203725 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.203900 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: E0128 12:22:00.226884 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.232648 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.232997 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.233210 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.233424 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.233608 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: E0128 12:22:00.254145 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.260260 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.260331 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.260357 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.260389 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.260412 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: E0128 12:22:00.280005 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.284345 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.284377 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.284389 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.284407 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.284420 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: E0128 12:22:00.301510 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: E0128 12:22:00.302276 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.304840 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.304881 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.304891 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.304910 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.304922 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.408020 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.408285 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.408375 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.408450 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.408524 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.511258 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.511296 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.511308 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.511327 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.511339 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.540739 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 09:24:52.452115452 +0000 UTC Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.563241 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.574782 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.585846 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.600656 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.612651 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.614360 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.614411 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.614425 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.614447 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.614460 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.629036 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.659015 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.671285 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.689684 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.704572 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.717648 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.717687 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.717702 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.717722 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.717629 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.717740 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.752127 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"message\\\":\\\"y.go:160\\\\nI0128 12:21:55.274903 5990 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.274959 5990 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.275073 5990 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275377 5990 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275465 5990 factory.go:656] Stopping watch factory\\\\nI0128 12:21:55.275636 5990 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275686 5990 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275852 5990 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.276036 5990 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.769062 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.786485 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.800922 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.814773 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.820118 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.820178 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.820192 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.820212 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.820226 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.828767 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:00Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.923196 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.923250 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.923259 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.923275 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:00 crc kubenswrapper[4685]: I0128 12:22:00.923285 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:00Z","lastTransitionTime":"2026-01-28T12:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.026369 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.026413 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.026423 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.026452 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.026462 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.129350 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.129404 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.129415 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.129433 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.129448 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.232439 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.232474 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.232482 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.232497 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.232506 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.335077 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.335123 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.335134 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.335152 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.335162 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.437497 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.437537 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.437544 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.437559 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.437568 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.539683 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.539720 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.539729 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.539743 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.539752 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.541887 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 03:41:03.743975564 +0000 UTC Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.545540 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.545644 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.545574 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:01 crc kubenswrapper[4685]: E0128 12:22:01.545979 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:01 crc kubenswrapper[4685]: E0128 12:22:01.546043 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:01 crc kubenswrapper[4685]: E0128 12:22:01.546132 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.546350 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:01 crc kubenswrapper[4685]: E0128 12:22:01.546478 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.643093 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.643349 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.643422 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.643526 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.643595 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.746401 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.746430 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.746440 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.746456 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.746467 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.849091 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.849234 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.849258 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.849287 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.849305 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.951703 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.952218 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.952305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.952411 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:01 crc kubenswrapper[4685]: I0128 12:22:01.952487 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:01Z","lastTransitionTime":"2026-01-28T12:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.054527 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.054579 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.054594 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.054617 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.054631 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.156935 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.156978 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.156989 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.157004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.157021 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.259750 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.259834 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.259857 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.259893 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.259921 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.362903 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.362951 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.362962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.362980 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.362991 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.465943 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.465987 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.465999 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.466017 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.466029 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.542071 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 03:56:51.754808446 +0000 UTC Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.569673 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.569737 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.569752 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.569778 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.569794 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.672786 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.672838 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.672850 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.672870 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.672881 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.776488 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.776567 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.776594 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.776628 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.776651 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.880879 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.880927 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.880939 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.880960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.880974 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.984302 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.984352 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.984369 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.984389 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:02 crc kubenswrapper[4685]: I0128 12:22:02.984406 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:02Z","lastTransitionTime":"2026-01-28T12:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.086981 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.087042 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.087067 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.087098 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.087121 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.190851 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.190902 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.190918 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.190936 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.190947 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.293477 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.293529 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.293547 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.293573 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.293590 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.397108 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.397162 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.397192 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.397216 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.397227 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.500557 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.500619 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.500641 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.500667 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.500685 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.542602 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 03:07:56.407133422 +0000 UTC Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.544934 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.544980 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.545031 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:03 crc kubenswrapper[4685]: E0128 12:22:03.545140 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.545221 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:03 crc kubenswrapper[4685]: E0128 12:22:03.545371 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:03 crc kubenswrapper[4685]: E0128 12:22:03.545804 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:03 crc kubenswrapper[4685]: E0128 12:22:03.545956 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.604028 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.604083 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.604097 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.604114 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.604125 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.706614 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.706677 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.706697 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.706720 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.706734 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.809057 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.809100 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.809111 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.809128 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.809141 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.911581 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.911639 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.911651 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.911668 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:03 crc kubenswrapper[4685]: I0128 12:22:03.911680 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:03Z","lastTransitionTime":"2026-01-28T12:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.014190 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.014238 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.014248 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.014266 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.014282 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.116697 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.116738 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.116747 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.116761 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.116770 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.219588 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.219628 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.219638 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.219655 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.219666 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.322120 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.322160 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.322190 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.322207 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.322218 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.424484 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.424747 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.424819 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.424883 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.424949 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.527902 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.527946 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.527962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.527986 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.528003 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.543180 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 10:26:34.112084342 +0000 UTC Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.630304 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.630372 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.630391 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.630419 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.630441 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.733204 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.733265 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.733282 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.733312 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.733333 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.835326 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.835382 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.835400 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.835423 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.835442 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.938255 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.938305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.938322 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.938344 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:04 crc kubenswrapper[4685]: I0128 12:22:04.938388 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:04Z","lastTransitionTime":"2026-01-28T12:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.042312 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.042350 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.042362 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.042377 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.042389 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.145335 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.145403 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.145424 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.145452 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.145470 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.248462 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.248520 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.248529 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.248547 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.248557 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.351105 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.351149 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.351159 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.351199 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.351214 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.453865 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.454157 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.454194 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.454215 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.454228 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.543459 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 03:45:21.128185954 +0000 UTC Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.545745 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.545850 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.546023 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.546122 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:05 crc kubenswrapper[4685]: E0128 12:22:05.546227 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:05 crc kubenswrapper[4685]: E0128 12:22:05.546371 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:05 crc kubenswrapper[4685]: E0128 12:22:05.546474 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:05 crc kubenswrapper[4685]: E0128 12:22:05.546626 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.556862 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.556903 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.556988 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.557012 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.557025 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.557583 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.660219 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.660259 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.660267 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.660303 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.660315 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.763082 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.763152 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.763184 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.763219 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.763231 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.865973 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.866038 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.866060 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.866086 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.866104 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.968823 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.968890 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.968903 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.968941 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:05 crc kubenswrapper[4685]: I0128 12:22:05.968982 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:05Z","lastTransitionTime":"2026-01-28T12:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.071448 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.071504 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.071519 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.071535 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.071547 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.174285 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.174355 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.174370 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.174386 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.174397 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.277016 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.277060 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.277070 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.277086 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.277096 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.379279 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.379345 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.379368 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.379400 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.379423 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.481993 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.482040 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.482050 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.482069 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.482079 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.544411 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 16:01:03.684411531 +0000 UTC Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.583754 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.583794 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.583805 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.583823 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.583835 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.685883 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.685945 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.685957 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.685975 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.685987 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.791953 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.792013 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.792032 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.792060 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.792077 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.894711 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.894839 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.894861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.894885 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.894947 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.999093 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.999140 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.999153 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.999191 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:06 crc kubenswrapper[4685]: I0128 12:22:06.999202 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:06Z","lastTransitionTime":"2026-01-28T12:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.101459 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.101507 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.101517 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.101534 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.101549 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.205718 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.205746 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.205755 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.205770 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.205779 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.308365 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.308402 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.308412 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.308425 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.308435 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.411338 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.411393 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.411410 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.411439 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.411457 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.513816 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.513854 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.513900 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.513940 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.513965 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.544888 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 16:45:41.114899813 +0000 UTC Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.545004 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.545071 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.545156 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:07 crc kubenswrapper[4685]: E0128 12:22:07.545151 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.545229 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:07 crc kubenswrapper[4685]: E0128 12:22:07.545342 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:07 crc kubenswrapper[4685]: E0128 12:22:07.545537 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:07 crc kubenswrapper[4685]: E0128 12:22:07.545593 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.616327 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.616381 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.616394 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.616413 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.616427 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.718761 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.718836 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.718849 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.718867 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.718879 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.821992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.822058 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.822076 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.822099 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.822115 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.924493 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.924555 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.924566 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.924583 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:07 crc kubenswrapper[4685]: I0128 12:22:07.924596 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:07Z","lastTransitionTime":"2026-01-28T12:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.027999 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.028044 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.028056 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.028074 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.028084 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.130420 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.130483 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.130501 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.130524 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.130542 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.234358 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.234434 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.234460 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.234492 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.234511 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.337507 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.337585 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.337602 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.337631 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.337647 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.440282 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.440343 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.440360 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.440384 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.440400 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.543925 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.543994 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.544013 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.544300 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.544344 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.545068 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 13:26:24.257622172 +0000 UTC Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.647515 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.647563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.647576 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.647594 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.647605 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.750467 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.750515 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.750533 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.750553 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.750568 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.853321 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.853358 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.853377 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.853397 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.853407 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.955984 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.956026 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.956038 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.956057 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:08 crc kubenswrapper[4685]: I0128 12:22:08.956069 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:08Z","lastTransitionTime":"2026-01-28T12:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.058865 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.058915 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.058930 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.058949 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.058962 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.161466 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.161532 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.161548 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.161566 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.161579 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.264320 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.264383 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.264403 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.264431 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.264448 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.367280 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.367333 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.367346 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.367365 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.367378 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.470998 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.471040 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.471048 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.471067 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.471078 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.545637 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 15:51:15.913737756 +0000 UTC Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.545705 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.545758 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.545792 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:09 crc kubenswrapper[4685]: E0128 12:22:09.545869 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.545787 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:09 crc kubenswrapper[4685]: E0128 12:22:09.545922 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:09 crc kubenswrapper[4685]: E0128 12:22:09.546042 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:09 crc kubenswrapper[4685]: E0128 12:22:09.546249 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.573900 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.573942 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.573955 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.573971 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.573983 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.677255 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.677310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.677327 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.677348 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.677362 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.780602 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.780638 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.780648 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.780665 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.780676 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.882698 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.882896 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.882917 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.882992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.883009 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.985220 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.985264 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.985278 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.985295 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:09 crc kubenswrapper[4685]: I0128 12:22:09.985309 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:09Z","lastTransitionTime":"2026-01-28T12:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.087764 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.087833 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.087856 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.087923 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.087953 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.194708 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.194743 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.194752 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.194766 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.194776 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.296639 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.296728 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.296751 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.296782 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.296807 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.399651 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.399695 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.399706 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.399721 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.399734 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.502692 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.502755 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.502772 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.502800 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.502824 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.546143 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 01:18:00.811473501 +0000 UTC Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.561284 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.581576 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.602910 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.606434 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.606502 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.606521 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.606546 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.606563 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.624795 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.642804 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.654877 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.676853 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://652bdf7f127fcf5dd5a0437f3abc3c3a190dd28030c99a396622aff0a86676e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"message\\\":\\\"y.go:160\\\\nI0128 12:21:55.274903 5990 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.274959 5990 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 12:21:55.275073 5990 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275377 5990 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275465 5990 factory.go:656] Stopping watch factory\\\\nI0128 12:21:55.275636 5990 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275686 5990 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.275852 5990 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 12:21:55.276036 5990 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.687282 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.687342 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.687360 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.687386 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.687404 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.688040 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: E0128 12:22:10.699821 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.703266 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.704549 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.704593 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.704605 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.704625 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.704670 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: E0128 12:22:10.717153 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.718051 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.721405 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.721457 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.721467 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.721484 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.721494 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.730338 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: E0128 12:22:10.733484 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.739072 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.739122 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.739136 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.739155 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.739189 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.747797 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: E0128 12:22:10.754400 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.760073 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.760114 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.760124 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.760138 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.760150 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.763013 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.773537 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: E0128 12:22:10.773532 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: E0128 12:22:10.773638 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.775076 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.775107 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.775117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.775132 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.775142 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.783371 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.793310 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.806721 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.817764 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:10Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.877988 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.878037 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.878052 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.878074 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.878088 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.981709 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.981768 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.981781 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.981802 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:10 crc kubenswrapper[4685]: I0128 12:22:10.981815 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:10Z","lastTransitionTime":"2026-01-28T12:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.084041 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.084099 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.084119 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.084144 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.084161 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.183085 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:11 crc kubenswrapper[4685]: E0128 12:22:11.183297 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:22:11 crc kubenswrapper[4685]: E0128 12:22:11.183375 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:22:43.183353527 +0000 UTC m=+114.270767362 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.186662 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.186778 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.186804 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.186881 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.186904 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.289928 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.290000 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.290027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.290059 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.290080 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.392787 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.392837 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.392855 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.392880 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.392897 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.495563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.495618 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.495629 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.495648 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.495668 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.545646 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.545673 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:11 crc kubenswrapper[4685]: E0128 12:22:11.545881 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.545907 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.545971 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:11 crc kubenswrapper[4685]: E0128 12:22:11.546137 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:11 crc kubenswrapper[4685]: E0128 12:22:11.546282 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:11 crc kubenswrapper[4685]: E0128 12:22:11.546355 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.546332 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 18:08:57.308068519 +0000 UTC Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.597804 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.598127 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.598144 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.598237 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.598280 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.709202 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.709245 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.709256 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.709273 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.709285 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.812117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.812160 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.812192 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.812209 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.812223 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.914605 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.914645 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.914658 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.914672 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:11 crc kubenswrapper[4685]: I0128 12:22:11.914682 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:11Z","lastTransitionTime":"2026-01-28T12:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.017721 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.017768 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.017783 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.017799 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.017810 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.128395 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.128456 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.128466 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.128481 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.128513 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.232161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.232275 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.232294 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.232319 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.232337 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.337585 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.337699 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.337719 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.337746 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.337772 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.440592 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.440649 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.440665 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.440689 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.440706 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.544215 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.544278 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.544299 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.544330 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.544350 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.546443 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 10:58:49.024605263 +0000 UTC Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.647277 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.647339 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.647358 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.647385 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.647406 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.750559 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.750623 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.750640 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.750665 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.750680 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.853575 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.853633 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.853651 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.853680 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.853699 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.956923 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.956985 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.957005 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.957033 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:12 crc kubenswrapper[4685]: I0128 12:22:12.957054 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:12Z","lastTransitionTime":"2026-01-28T12:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.060739 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.060827 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.060848 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.060878 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.060900 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.163834 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.163913 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.163931 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.163960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.163979 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.266921 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.266990 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.267006 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.267033 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.267053 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.370767 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.370837 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.370860 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.370888 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.370905 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.473867 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.473933 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.473951 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.473978 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.474000 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.545791 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.545844 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.545844 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.546355 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:13 crc kubenswrapper[4685]: E0128 12:22:13.546546 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.546631 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 03:49:24.412412588 +0000 UTC Jan 28 12:22:13 crc kubenswrapper[4685]: E0128 12:22:13.546737 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:13 crc kubenswrapper[4685]: E0128 12:22:13.546860 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:13 crc kubenswrapper[4685]: E0128 12:22:13.546885 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.547030 4685 scope.go:117] "RemoveContainer" containerID="84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.577098 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.579035 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.579112 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.579137 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.579203 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.579231 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.600675 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.620816 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.644626 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.679037 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-b85rl_openshift-ovn-kubernetes(6fd64f35-81dc-4978-84e8-a746e9a79ccd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.681807 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.681936 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.682034 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.682146 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.682300 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.693480 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.705975 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.724094 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.738747 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.754607 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.775091 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.785640 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.785707 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.785720 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.785739 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.785750 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.786120 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.797473 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.808234 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.820840 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.838061 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.871629 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.905548 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.905625 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.905648 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.905680 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.905704 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:13Z","lastTransitionTime":"2026-01-28T12:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:13 crc kubenswrapper[4685]: I0128 12:22:13.914456 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:13Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.008957 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.009015 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.009027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.009044 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.009057 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.111749 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.111809 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.111821 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.111840 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.111856 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.214999 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.215079 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.215096 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.215126 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.215147 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.318121 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.318235 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.318257 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.318284 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.318303 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.429159 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.429274 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.429295 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.429323 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.429341 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.532285 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.532335 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.532358 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.532382 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.532430 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.547479 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 21:56:17.908718459 +0000 UTC Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.656835 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.656877 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.656892 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.656913 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.656925 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.759941 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.760008 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.760026 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.760052 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.760069 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.862466 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.862523 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.862540 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.862566 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.862585 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.966093 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.966149 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.966165 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.966229 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:14 crc kubenswrapper[4685]: I0128 12:22:14.966252 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:14Z","lastTransitionTime":"2026-01-28T12:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.069967 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.070034 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.070053 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.070078 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.070100 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.172884 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.172943 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.172962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.172988 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.173008 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.202444 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/1.log" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.206632 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.207365 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.230754 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.249714 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.266438 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.279580 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.279627 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.279645 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.279671 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.279690 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.288554 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.309349 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.328575 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.344856 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.373482 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.382910 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.382957 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.382966 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.382984 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.382993 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.390541 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.410768 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:22:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.427146 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.450706 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.467192 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.487432 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.492731 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.492797 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.492823 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.492854 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.492877 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.507720 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.522334 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.540034 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.545147 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:15 crc kubenswrapper[4685]: E0128 12:22:15.545325 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.545429 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:15 crc kubenswrapper[4685]: E0128 12:22:15.545520 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.545607 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:15 crc kubenswrapper[4685]: E0128 12:22:15.545703 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.545777 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:15 crc kubenswrapper[4685]: E0128 12:22:15.545858 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.550998 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 22:06:43.643411167 +0000 UTC Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.561312 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:15Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.595931 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.595987 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.596004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.596029 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.596046 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.711337 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.711387 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.711406 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.711432 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.711452 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.814670 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.814734 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.814754 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.814783 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.814804 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.918122 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.918224 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.918248 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.918276 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:15 crc kubenswrapper[4685]: I0128 12:22:15.918294 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:15Z","lastTransitionTime":"2026-01-28T12:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.023364 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.023435 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.023460 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.023487 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.023504 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.127945 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.127998 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.128018 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.128043 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.128061 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.231530 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.231590 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.231608 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.231633 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.231650 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.335036 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.335080 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.335094 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.335115 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.335131 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.437387 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.437413 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.437421 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.437434 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.437443 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.540270 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.540310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.540319 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.540335 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.540344 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.551113 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 14:06:18.932035959 +0000 UTC Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.643899 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.643973 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.643990 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.644014 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.644033 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.747627 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.747686 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.747699 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.747718 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.747731 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.883828 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.883885 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.883899 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.883921 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.883934 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.987007 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.987071 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.987089 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.987114 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:16 crc kubenswrapper[4685]: I0128 12:22:16.987131 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:16Z","lastTransitionTime":"2026-01-28T12:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.089998 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.090059 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.090075 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.090098 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.090115 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.193031 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.193138 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.193161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.193214 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.193233 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.217403 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/2.log" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.218925 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/1.log" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.223288 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1" exitCode=1 Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.223349 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.223410 4685 scope.go:117] "RemoveContainer" containerID="84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.224549 4685 scope.go:117] "RemoveContainer" containerID="426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1" Jan 28 12:22:17 crc kubenswrapper[4685]: E0128 12:22:17.224823 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-b85rl_openshift-ovn-kubernetes(6fd64f35-81dc-4978-84e8-a746e9a79ccd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.244126 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.264050 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.284139 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.297869 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.297945 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.297966 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.297992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.298017 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.309956 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.332554 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.350667 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.375328 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.395449 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.400510 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.400565 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.400582 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.400609 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.400627 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.418525 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.438419 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.459874 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.482286 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.503633 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.503694 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.503718 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.503747 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.503770 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.530599 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.544875 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.544935 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:17 crc kubenswrapper[4685]: E0128 12:22:17.545022 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.544877 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:17 crc kubenswrapper[4685]: E0128 12:22:17.545098 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.544934 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:17 crc kubenswrapper[4685]: E0128 12:22:17.545136 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:17 crc kubenswrapper[4685]: E0128 12:22:17.545261 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.549589 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.551781 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 00:17:21.067815831 +0000 UTC Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.566626 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.579506 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.606436 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.606487 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.606505 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.606532 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.606553 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.618791 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:17Z\\\",\\\"message\\\":\\\"r-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:22:16.885279 6509 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0128 12:22:16.885295 6509 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:22:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.633850 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:17Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.710452 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.710496 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.710512 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.710536 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.710552 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.830453 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.830531 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.830556 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.830583 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.830605 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.933480 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.933559 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.933578 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.933605 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:17 crc kubenswrapper[4685]: I0128 12:22:17.933625 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:17Z","lastTransitionTime":"2026-01-28T12:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.036585 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.036656 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.036675 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.036699 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.036717 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.139921 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.139971 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.139991 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.140015 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.140033 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.230427 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/2.log" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.243101 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.243159 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.243236 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.243260 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.243278 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.346846 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.346923 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.346939 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.346963 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.346979 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.450054 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.450116 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.450133 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.450159 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.450250 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.551928 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 15:56:07.994666758 +0000 UTC Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.552536 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.552594 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.552613 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.552637 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.552655 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.655830 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.655933 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.655986 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.656012 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.656030 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.759982 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.760054 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.760081 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.760145 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.760205 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.863371 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.863468 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.863487 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.863512 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.863530 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.967122 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.967234 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.967259 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.967290 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:18 crc kubenswrapper[4685]: I0128 12:22:18.967309 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:18Z","lastTransitionTime":"2026-01-28T12:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.070717 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.070808 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.070831 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.070865 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.070896 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.173789 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.173838 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.173851 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.173870 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.173883 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.277148 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.277220 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.277231 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.277250 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.277261 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.380414 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.380481 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.380503 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.380532 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.380552 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.483605 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.483672 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.483696 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.483725 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.483746 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.545481 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.545552 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.545573 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.545635 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:19 crc kubenswrapper[4685]: E0128 12:22:19.545715 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:19 crc kubenswrapper[4685]: E0128 12:22:19.545844 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:19 crc kubenswrapper[4685]: E0128 12:22:19.545920 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:19 crc kubenswrapper[4685]: E0128 12:22:19.545859 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.552469 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 12:54:07.523761005 +0000 UTC Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.587117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.587407 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.587638 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.587799 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.587928 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.690696 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.690761 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.690781 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.690808 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.690829 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.793119 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.793213 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.793234 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.793262 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.793284 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.896970 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.897046 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.897069 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.897108 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:19 crc kubenswrapper[4685]: I0128 12:22:19.897129 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:19Z","lastTransitionTime":"2026-01-28T12:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.000280 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.000427 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.000453 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.000482 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.000504 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.103592 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.103668 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.103691 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.103721 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.103742 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.207404 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.207453 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.207465 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.207483 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.207497 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.310305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.310375 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.310390 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.310406 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.310418 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.413447 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.413487 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.413497 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.413513 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.413524 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.516762 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.516824 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.516841 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.516868 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.516885 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.553683 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 01:39:57.116987645 +0000 UTC Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.562090 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.581362 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.600221 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.613522 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.619204 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.619255 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.619268 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.619288 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.619301 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.631351 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.643354 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.657670 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.667334 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.684936 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:17Z\\\",\\\"message\\\":\\\"r-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:22:16.885279 6509 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0128 12:22:16.885295 6509 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:22:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.696049 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.706340 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.717161 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.722840 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.722891 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.722905 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.722924 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.722937 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.730418 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.743403 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.757660 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.768096 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.788333 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.802865 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.825228 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.825269 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.825284 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.825306 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.825321 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.830279 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.830353 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.830377 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.830410 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.830521 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: E0128 12:22:20.847247 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.851926 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.851982 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.852002 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.852028 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.852046 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: E0128 12:22:20.872020 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.877009 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.877070 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.877093 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.877122 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.877144 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: E0128 12:22:20.897688 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.902468 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.902524 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.902541 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.902568 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.902585 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: E0128 12:22:20.923794 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.928675 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.928735 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.928751 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.928774 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.928795 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:20 crc kubenswrapper[4685]: E0128 12:22:20.950061 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:20Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:20 crc kubenswrapper[4685]: E0128 12:22:20.950355 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.952924 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.952971 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.952987 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.953012 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:20 crc kubenswrapper[4685]: I0128 12:22:20.953030 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:20Z","lastTransitionTime":"2026-01-28T12:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.056352 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.056407 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.056424 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.056446 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.056464 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.159547 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.159621 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.159637 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.159669 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.159690 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.262556 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.262623 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.262687 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.262719 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.262745 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.365795 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.365869 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.365889 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.365913 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.365930 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.468540 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.468631 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.468651 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.468711 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.468730 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.545734 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.545800 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.545851 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:21 crc kubenswrapper[4685]: E0128 12:22:21.545883 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.545909 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:21 crc kubenswrapper[4685]: E0128 12:22:21.546076 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:21 crc kubenswrapper[4685]: E0128 12:22:21.546101 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:21 crc kubenswrapper[4685]: E0128 12:22:21.546152 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.553917 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 18:54:29.774235705 +0000 UTC Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.571452 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.571558 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.571578 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.571609 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.571628 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.674388 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.674438 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.674451 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.674484 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.674498 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.781027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.781125 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.781139 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.781161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.781197 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.885865 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.885915 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.885932 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.885958 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.885976 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.989953 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.990002 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.990014 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.990036 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:21 crc kubenswrapper[4685]: I0128 12:22:21.990048 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:21Z","lastTransitionTime":"2026-01-28T12:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.093671 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.093772 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.093789 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.093817 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.093835 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.197283 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.197391 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.197413 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.197448 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.197472 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.301324 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.301388 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.301406 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.301432 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.301457 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.404194 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.404250 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.404262 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.404284 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.404326 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.508429 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.508537 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.508549 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.508570 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.508584 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.554851 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 18:49:10.073892037 +0000 UTC Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.611735 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.611798 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.611816 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.611840 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.611858 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.714902 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.714949 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.714969 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.714994 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.715012 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.817771 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.817828 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.817848 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.817874 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.817892 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.920625 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.920671 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.920687 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.920712 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:22 crc kubenswrapper[4685]: I0128 12:22:22.920733 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:22Z","lastTransitionTime":"2026-01-28T12:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.025058 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.025115 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.025137 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.025167 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.025222 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.128986 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.129068 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.129096 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.129132 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.129155 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.233630 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.233691 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.233701 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.233722 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.233734 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.336981 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.337057 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.337074 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.337093 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.337129 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.440407 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.440477 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.440492 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.440519 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.440536 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.543881 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.543927 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.543939 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.543958 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.543972 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.544817 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.544817 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.544884 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.544944 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:23 crc kubenswrapper[4685]: E0128 12:22:23.545057 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:23 crc kubenswrapper[4685]: E0128 12:22:23.545252 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:23 crc kubenswrapper[4685]: E0128 12:22:23.545366 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:23 crc kubenswrapper[4685]: E0128 12:22:23.545443 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.555668 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 14:11:05.1920238 +0000 UTC Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.647407 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.647470 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.647486 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.647522 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.647541 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.750745 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.750849 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.750870 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.750900 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.750917 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.854218 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.854277 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.854289 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.854308 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.854318 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.961815 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.961874 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.961893 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.961920 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:23 crc kubenswrapper[4685]: I0128 12:22:23.961938 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:23Z","lastTransitionTime":"2026-01-28T12:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.285153 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.285225 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.285237 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.285254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.285263 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:24Z","lastTransitionTime":"2026-01-28T12:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.387479 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.387509 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.387517 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.387530 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.387540 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:24Z","lastTransitionTime":"2026-01-28T12:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.490348 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.490407 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.490423 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.490449 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.490467 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:24Z","lastTransitionTime":"2026-01-28T12:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.555838 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 21:27:06.696951676 +0000 UTC Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.593884 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.593961 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.593979 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.594008 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.594027 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:24Z","lastTransitionTime":"2026-01-28T12:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.697725 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.697804 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.697822 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.697861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.697881 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:24Z","lastTransitionTime":"2026-01-28T12:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.801318 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.801364 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.801377 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.801397 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.801411 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:24Z","lastTransitionTime":"2026-01-28T12:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.903962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.904025 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.904042 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.904070 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:24 crc kubenswrapper[4685]: I0128 12:22:24.904089 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:24Z","lastTransitionTime":"2026-01-28T12:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.006922 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.006967 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.006976 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.006993 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.007005 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.110305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.110358 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.110369 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.110384 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.110395 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.213666 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.213715 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.213731 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.213750 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.213765 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.301096 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rrnv6_28aac5d8-57ac-4302-ab17-c07f33fcaffd/kube-multus/0.log" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.301237 4685 generic.go:334] "Generic (PLEG): container finished" podID="28aac5d8-57ac-4302-ab17-c07f33fcaffd" containerID="b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8" exitCode=1 Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.301286 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rrnv6" event={"ID":"28aac5d8-57ac-4302-ab17-c07f33fcaffd","Type":"ContainerDied","Data":"b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.301815 4685 scope.go:117] "RemoveContainer" containerID="b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.315899 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.315935 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.315944 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.315960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.315972 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.326612 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.344427 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:24Z\\\",\\\"message\\\":\\\"2026-01-28T12:21:37+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57\\\\n2026-01-28T12:21:37+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57 to /host/opt/cni/bin/\\\\n2026-01-28T12:21:39Z [verbose] multus-daemon started\\\\n2026-01-28T12:21:39Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:22:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.361007 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.376552 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.391636 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.404944 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.418957 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.419478 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.419507 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.419515 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.419529 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.419540 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.430831 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.462779 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:17Z\\\",\\\"message\\\":\\\"r-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:22:16.885279 6509 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0128 12:22:16.885295 6509 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:22:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.473008 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.484440 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.496642 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.508231 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.521036 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.521986 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.522019 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.522028 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.522045 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.522054 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.534791 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.545309 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.545359 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:25 crc kubenswrapper[4685]: E0128 12:22:25.545434 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:25 crc kubenswrapper[4685]: E0128 12:22:25.545539 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.545682 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.545719 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:25 crc kubenswrapper[4685]: E0128 12:22:25.545786 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:25 crc kubenswrapper[4685]: E0128 12:22:25.546052 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.547964 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.556617 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 21:27:53.793917678 +0000 UTC Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.558414 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.568484 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:25Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.624551 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.624608 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.624622 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.624642 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.624656 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.728600 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.728654 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.728668 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.728698 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.728714 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.831762 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.831826 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.831843 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.831869 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.831886 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.935254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.935323 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.935336 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.935357 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:25 crc kubenswrapper[4685]: I0128 12:22:25.935369 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:25Z","lastTransitionTime":"2026-01-28T12:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.038125 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.038212 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.038228 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.038251 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.038266 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.141465 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.141522 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.141543 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.141569 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.141587 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.244077 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.244109 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.244120 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.244137 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.244147 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.310563 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rrnv6_28aac5d8-57ac-4302-ab17-c07f33fcaffd/kube-multus/0.log" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.310704 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rrnv6" event={"ID":"28aac5d8-57ac-4302-ab17-c07f33fcaffd","Type":"ContainerStarted","Data":"ecaf6ac86fff546861ecdfe2860a2f6c859ee43807bd8a8384c9567315300893"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.333585 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.349249 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.349295 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.349310 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.349333 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.349349 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.360548 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ecaf6ac86fff546861ecdfe2860a2f6c859ee43807bd8a8384c9567315300893\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:24Z\\\",\\\"message\\\":\\\"2026-01-28T12:21:37+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57\\\\n2026-01-28T12:21:37+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57 to /host/opt/cni/bin/\\\\n2026-01-28T12:21:39Z [verbose] multus-daemon started\\\\n2026-01-28T12:21:39Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:22:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:22:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.380544 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.398502 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.414601 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.429261 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.450900 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:17Z\\\",\\\"message\\\":\\\"r-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:22:16.885279 6509 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0128 12:22:16.885295 6509 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:22:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.452880 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.452928 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.452960 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.452979 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.452993 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.466847 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.485609 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.503014 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.520260 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.538224 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.555676 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.555714 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.555746 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.555893 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.555922 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.555936 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.556754 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 21:58:19.28742098 +0000 UTC Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.570779 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.589644 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.611187 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.629867 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.647029 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:26Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.659243 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.659280 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.659292 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.659308 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.659320 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.761815 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.761860 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.761869 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.761884 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.761893 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.864950 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.865018 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.865036 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.865062 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.865079 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.968330 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.968422 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.968447 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.968486 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:26 crc kubenswrapper[4685]: I0128 12:22:26.968511 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:26Z","lastTransitionTime":"2026-01-28T12:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.071257 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.071318 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.071343 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.071375 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.071399 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.174827 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.174890 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.174909 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.174935 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.174953 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.278127 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.278260 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.278278 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.278303 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.278330 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.388563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.388637 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.388655 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.388679 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.388697 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.423269 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.423414 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423482 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:31.42345195 +0000 UTC m=+162.510865825 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.423558 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.423597 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.423647 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423669 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423706 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423731 4685 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423766 4685 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423814 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:23:31.4238007 +0000 UTC m=+162.511214575 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423810 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423836 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 12:23:31.42382603 +0000 UTC m=+162.511239895 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423860 4685 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423885 4685 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.423972 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 12:23:31.423946914 +0000 UTC m=+162.511360789 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.424095 4685 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.424439 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 12:23:31.424314354 +0000 UTC m=+162.511728249 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.491939 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.492011 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.492033 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.492065 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.492085 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.544862 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.544926 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.544977 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.544872 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.545147 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.545325 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.545458 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:27 crc kubenswrapper[4685]: E0128 12:22:27.545593 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.557335 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 17:31:34.025870526 +0000 UTC Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.595867 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.595938 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.595961 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.595995 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.596020 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.699587 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.699652 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.699669 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.699700 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.699720 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.802585 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.802645 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.802664 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.802689 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.802711 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.905125 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.905244 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.905271 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.905304 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:27 crc kubenswrapper[4685]: I0128 12:22:27.905329 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:27Z","lastTransitionTime":"2026-01-28T12:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.007828 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.007879 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.007894 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.007917 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.007935 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.110663 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.110709 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.110718 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.110736 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.110748 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.213686 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.213747 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.213759 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.213779 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.213791 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.316428 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.316495 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.316508 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.316529 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.316544 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.420693 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.420770 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.420794 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.420825 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.420848 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.523507 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.523606 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.523637 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.523668 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.523691 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.557974 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 06:48:16.843510235 +0000 UTC Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.626336 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.626417 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.626436 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.626462 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.626476 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.730952 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.730992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.731004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.731021 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.731032 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.833716 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.833784 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.833811 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.833839 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.833861 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.937128 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.937268 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.937296 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.937329 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:28 crc kubenswrapper[4685]: I0128 12:22:28.937353 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:28Z","lastTransitionTime":"2026-01-28T12:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.039945 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.040022 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.040047 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.040077 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.040096 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.143260 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.143325 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.143343 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.143371 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.143388 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.246932 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.247004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.247022 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.247047 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.247064 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.350439 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.350513 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.350543 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.350574 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.350598 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.454593 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.454674 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.454699 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.454736 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.454760 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.545530 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.545616 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.545695 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.545551 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:29 crc kubenswrapper[4685]: E0128 12:22:29.545807 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:29 crc kubenswrapper[4685]: E0128 12:22:29.545933 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:29 crc kubenswrapper[4685]: E0128 12:22:29.546041 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:29 crc kubenswrapper[4685]: E0128 12:22:29.546223 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.557615 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.557654 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.557672 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.557697 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.557716 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.558301 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 18:11:52.521136683 +0000 UTC Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.661798 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.661861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.661895 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.662094 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.662126 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.766473 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.766524 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.766540 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.766565 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.766584 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.871302 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.871394 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.871404 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.871427 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.871439 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.974240 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.974334 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.974362 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.974397 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:29 crc kubenswrapper[4685]: I0128 12:22:29.974421 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:29Z","lastTransitionTime":"2026-01-28T12:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.077924 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.078004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.078029 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.078054 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.078080 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.182214 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.182298 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.182321 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.182355 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.182379 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.284982 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.285034 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.285045 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.285063 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.285075 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.387749 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.387820 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.387840 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.387867 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.387888 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.491382 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.491848 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.492049 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.492362 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.492615 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.558848 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 20:59:04.681898329 +0000 UTC Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.567380 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.584689 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.595639 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.595705 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.595728 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.595753 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.595770 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.604999 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.622192 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.641364 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.661097 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.681409 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.692727 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.697810 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.697858 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.697870 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.697885 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.697897 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.714603 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f54b424ac0e95384e1b9f78d021b85629fe3429495d95ff52c7349d7c87437\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:21:58Z\\\",\\\"message\\\":\\\"_uuid == {ba175bbe-5cc4-47e6-a32d-57693e1320bd}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280794 6218 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.280851 6218 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:21:58.279481 6218 services_controller.go:360] Finished syncing service console on namespace openshift-console for network=default : 5.55701ms\\\\nI0128 12:21:58.280949 6218 services_controller.go:356] Processing sync for service openshift-authentication-operator/metrics for network=default\\\\nF0128 12:21:58.280973 6218 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:17Z\\\",\\\"message\\\":\\\"r-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:22:16.885279 6509 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0128 12:22:16.885295 6509 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:22:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.727953 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.743517 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.758825 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.779451 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.798349 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.800927 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.801003 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.801027 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.801059 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.801086 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.815803 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.830394 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.848692 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.866237 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ecaf6ac86fff546861ecdfe2860a2f6c859ee43807bd8a8384c9567315300893\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:24Z\\\",\\\"message\\\":\\\"2026-01-28T12:21:37+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57\\\\n2026-01-28T12:21:37+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57 to /host/opt/cni/bin/\\\\n2026-01-28T12:21:39Z [verbose] multus-daemon started\\\\n2026-01-28T12:21:39Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:22:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:22:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:30Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.904442 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.904505 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.904526 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.904555 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:30 crc kubenswrapper[4685]: I0128 12:22:30.904606 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:30Z","lastTransitionTime":"2026-01-28T12:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.007558 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.007644 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.007669 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.007703 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.007729 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.110282 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.110355 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.110380 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.110409 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.110429 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.213970 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.214034 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.214050 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.214074 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.214092 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.306523 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.306833 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.306967 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.307108 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.307288 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.331364 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:31Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.337258 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.337302 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.337319 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.337342 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.337360 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.360902 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:31Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.365956 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.366018 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.366037 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.366062 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.366084 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.384909 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:31Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.389819 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.389866 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.389879 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.389898 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.389910 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.404275 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:31Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.408684 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.408726 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.408739 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.408761 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.408777 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.426769 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"1c1fbee8-908e-4ee5-ba57-4efd9bd9c0cd\\\",\\\"systemUUID\\\":\\\"b11ccd4a-5d5c-4d26-9d13-26d2c695f32b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:31Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.426928 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.429152 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.429263 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.429280 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.429305 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.429324 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.532880 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.532935 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.532948 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.532970 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.532985 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.545826 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.545870 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.545819 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.545990 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.545983 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.546144 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.546407 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:31 crc kubenswrapper[4685]: E0128 12:22:31.546496 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.559040 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 05:51:16.390529931 +0000 UTC Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.635607 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.635667 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.635682 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.635702 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.635716 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.738754 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.738787 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.738795 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.738809 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.738819 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.842018 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.842084 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.842104 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.842129 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.842148 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.945241 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.945299 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.945315 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.945342 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:31 crc kubenswrapper[4685]: I0128 12:22:31.945360 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:31Z","lastTransitionTime":"2026-01-28T12:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.047843 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.047880 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.047888 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.047902 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.047911 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.150985 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.151054 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.151071 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.151096 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.151113 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.254857 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.254957 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.254975 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.255004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.255024 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.358164 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.358317 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.358337 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.358363 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.358389 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.461454 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.461531 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.461556 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.461588 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.461612 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.545498 4685 scope.go:117] "RemoveContainer" containerID="426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1" Jan 28 12:22:32 crc kubenswrapper[4685]: E0128 12:22:32.545799 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-b85rl_openshift-ovn-kubernetes(6fd64f35-81dc-4978-84e8-a746e9a79ccd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.560000 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 12:43:18.510896315 +0000 UTC Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.564220 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.564298 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.564321 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.564352 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.564378 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.578026 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a28aa0f-04d6-471c-95f2-ef2268a29b62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aab7e5498efc3a955171be7c657be8188a9338e34ae0a2315fa7372efbb5f71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f57526d93cd9128355c5d3ac44a4673d598c461d4a56703cb323bfd81f58fa80\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fae6ebb1868dc521f89a673507aa91ac747031f380661bda15f8014828f9500\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bfa5be30c64339ecb7fa8abdac0b600a0bde7a13274f44479fcc9fe0086da8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c74c526a4de1d7369a5b18a3fc9a99f08d36ff74a98e1b23007d7f30f4a076e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec7935a9a0d6084e36bc1bc23af6dc80c9ee81b52329ac79689da440421414f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0fd0838b6c98117c42428ab95294f6f28cfbad28c5259015ac8c4d0673b671d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fsvg6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkpgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.604443 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rrnv6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28aac5d8-57ac-4302-ab17-c07f33fcaffd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:22:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ecaf6ac86fff546861ecdfe2860a2f6c859ee43807bd8a8384c9567315300893\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:24Z\\\",\\\"message\\\":\\\"2026-01-28T12:21:37+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57\\\\n2026-01-28T12:21:37+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c5883195-3cab-4765-b8ff-62a0b5801c57 to /host/opt/cni/bin/\\\\n2026-01-28T12:21:39Z [verbose] multus-daemon started\\\\n2026-01-28T12:21:39Z [verbose] Readiness Indicator file check\\\\n2026-01-28T12:22:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:22:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpxs4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rrnv6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.623978 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.646430 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d41535238cf25b5c2140c68673708276e7aea5870d3d9e10f3eef483e7107d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdlnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h5wpv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.667123 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ml94r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"11d63fa9-8fe9-435e-87ce-e804aadd7def\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0561edc13018f2cdbca6368287748a3834a16dd7b9c4b100f83bceb3e8b4f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-86vvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ml94r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.667420 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.667510 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.667554 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.667577 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.667593 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.690881 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 12:21:17.310581 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 12:21:17.311475 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3919464288/tls.crt::/tmp/serving-cert-3919464288/tls.key\\\\\\\"\\\\nI0128 12:21:22.729458 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 12:21:22.825151 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 12:21:22.825270 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 12:21:22.825306 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 12:21:22.825314 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 12:21:22.840969 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0128 12:21:22.840985 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0128 12:21:22.841001 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841033 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 12:21:22.841038 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 12:21:22.841042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 12:21:22.841046 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 12:21:22.841049 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0128 12:21:22.843316 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.715266 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e273a2a8-cc34-44bd-b203-39b8b215f5f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c550bd50df659ec4dc0c9b0b17877fd7d74690893bad84e371e10fb8d299030\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cca49fb255332de46ff7f3fdd4a12790e81d2c5bc6e60764321d6da245867fe0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88f79a63bff913e23e2d64c9f456ed2cb31358723defab9c3b4bcefe07054937\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.732641 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://937856c91821d6132c649246d542c456fc46deba69757241d3761a478c99b912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.749028 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aaf168f02ced46b4ab46081b956e77ca5722818ca779f296c012a3a6948c5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.763970 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d83fcf675c663d7497035ee94ccef64e4b824af1eeb8880ccc760445b7503a99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.770535 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.770586 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.770606 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.770632 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.770650 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.779034 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-bpbjn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31830369-ada9-4ed3-8265-15051d6315f4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c023419cf00457eca6f5282f11686bdec35259b540908b27be5c5822e683ea92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6pqm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-bpbjn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.797951 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fd64f35-81dc-4978-84e8-a746e9a79ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:25Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T12:22:17Z\\\",\\\"message\\\":\\\"r-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 12:22:16.885279 6509 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0128 12:22:16.885295 6509 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T12:22:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-b85rl_openshift-ovn-kubernetes(6fd64f35-81dc-4978-84e8-a746e9a79ccd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-862jf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-b85rl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.816006 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee77451-6566-4b66-9de7-cd679ec96556\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdd096013bc79dbef2af62d85f0bde6bb4227ef5f1e4b19e8d54f46c56aa8e28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0dbc5b6b55278dfd36841f37e2e6eaaff502fc0d660a0c1ad1e31a2d1c0305ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.829911 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d357ec17-9216-429a-95b6-f1b12fe0315b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:20:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d39b1a67b3e35f25fd74df829ea3e77dbf70af50eec20396256657b7d7843f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d6ee7dca5cc5555e11d6e2550d13cd6386cbf84f722d6783cb9bb108ad60401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dae76144067f7839771549d4c80cd4b908d3d2e64b73655c5ee92749af7a3a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a02b0552124e4a3417fdfe1b38511904cf1fe354ea81169aad3699086fe805\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T12:21:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T12:21:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:20:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.859673 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.872832 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.874038 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.874081 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.874097 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.874117 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.874135 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.886312 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94fa291c-6b2a-4a3b-b70d-def6dd28589b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://452ade6d423750ef7171a63f08cd01fbb9df8ac9364338c63a9ce2f890854c26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc6b041ce8cea568890255015c84072a96d1d62f62e371b60974fe8a7b54500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pk4p2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-m96cf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.897547 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T12:21:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ls7x7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T12:21:39Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5x4kp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T12:22:32Z is after 2025-08-24T17:21:41Z" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.977254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.977345 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.977370 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.977404 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:32 crc kubenswrapper[4685]: I0128 12:22:32.977427 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:32Z","lastTransitionTime":"2026-01-28T12:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.080873 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.080921 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.080936 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.080961 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.080978 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.184524 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.184586 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.184621 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.184670 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.184693 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.287435 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.287505 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.287521 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.287548 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.287565 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.390563 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.390628 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.390645 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.390669 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.390691 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.495221 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.495300 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.495324 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.495355 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.495377 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.545042 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.545116 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.545216 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.545318 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:33 crc kubenswrapper[4685]: E0128 12:22:33.545336 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:33 crc kubenswrapper[4685]: E0128 12:22:33.545466 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:33 crc kubenswrapper[4685]: E0128 12:22:33.545661 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:33 crc kubenswrapper[4685]: E0128 12:22:33.545794 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.560774 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 13:42:20.588889367 +0000 UTC Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.598754 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.598805 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.598827 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.598856 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.598879 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.701848 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.701922 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.701940 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.701968 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.701989 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.805750 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.805822 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.805843 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.805868 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.805886 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.909229 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.909298 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.909326 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.909356 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:33 crc kubenswrapper[4685]: I0128 12:22:33.909378 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:33Z","lastTransitionTime":"2026-01-28T12:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.022065 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.022139 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.022158 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.022223 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.022247 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.125214 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.125246 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.125254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.125268 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.125277 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.228091 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.228120 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.228128 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.228142 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.228151 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.331343 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.331423 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.331450 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.331482 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.331502 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.434254 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.434319 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.434333 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.434366 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.434385 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.537444 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.537502 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.537865 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.537895 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.537909 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.562237 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 22:22:38.978791342 +0000 UTC Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.640788 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.640832 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.640842 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.640858 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.640867 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.743800 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.743894 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.743911 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.743931 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.743942 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.845892 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.845973 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.845992 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.846014 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.846030 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.949861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.949935 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.949955 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.949991 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:34 crc kubenswrapper[4685]: I0128 12:22:34.950015 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:34Z","lastTransitionTime":"2026-01-28T12:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.053289 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.053361 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.053378 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.053407 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.053426 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.156666 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.157143 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.157161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.157268 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.157291 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.261040 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.261107 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.261123 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.261148 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.261167 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.364243 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.364319 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.364339 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.364372 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.364395 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.468063 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.468112 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.468129 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.468153 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.468207 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.545029 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.545104 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:35 crc kubenswrapper[4685]: E0128 12:22:35.545257 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.545043 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.545530 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:35 crc kubenswrapper[4685]: E0128 12:22:35.545655 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:35 crc kubenswrapper[4685]: E0128 12:22:35.546033 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:35 crc kubenswrapper[4685]: E0128 12:22:35.546234 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.562674 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 09:47:05.180914714 +0000 UTC Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.571233 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.571309 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.571331 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.571357 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.571380 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.674498 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.674571 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.674594 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.674627 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.674663 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.778353 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.778391 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.778400 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.778418 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.778429 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.882063 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.882132 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.882154 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.882217 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.882242 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.984911 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.984964 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.984984 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.985017 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:35 crc kubenswrapper[4685]: I0128 12:22:35.985039 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:35Z","lastTransitionTime":"2026-01-28T12:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.088459 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.088528 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.088545 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.088572 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.088593 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.192673 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.192728 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.192747 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.192772 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.192790 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.296134 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.296225 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.296246 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.296313 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.296335 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.398498 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.398531 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.398539 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.398553 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.398560 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.501430 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.501530 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.501550 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.501572 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.501589 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.563392 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 05:20:52.983544872 +0000 UTC Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.603789 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.603857 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.603876 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.603904 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.603924 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.706242 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.706292 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.706306 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.706326 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.706339 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.809211 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.809258 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.809269 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.809289 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.809302 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.912130 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.912189 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.912199 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.912215 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:36 crc kubenswrapper[4685]: I0128 12:22:36.912227 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:36Z","lastTransitionTime":"2026-01-28T12:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.015813 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.015890 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.015911 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.015937 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.015957 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.118513 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.118581 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.118599 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.118625 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.118644 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.221659 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.221707 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.221739 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.221771 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.221794 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.324787 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.324855 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.324872 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.324898 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.324929 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.428769 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.428832 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.428850 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.428877 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.428898 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.532751 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.532807 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.532825 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.532850 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.532870 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.545472 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:37 crc kubenswrapper[4685]: E0128 12:22:37.545643 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.545736 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:37 crc kubenswrapper[4685]: E0128 12:22:37.545825 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.545906 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:37 crc kubenswrapper[4685]: E0128 12:22:37.545993 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.546062 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:37 crc kubenswrapper[4685]: E0128 12:22:37.546137 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.564000 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 16:27:11.516049235 +0000 UTC Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.636503 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.636570 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.636588 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.636615 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.636633 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.739593 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.739700 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.739723 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.739752 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.739774 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.842814 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.842901 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.842926 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.842962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.842985 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.946666 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.946740 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.946759 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.946789 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:37 crc kubenswrapper[4685]: I0128 12:22:37.946808 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:37Z","lastTransitionTime":"2026-01-28T12:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.050085 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.050160 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.050205 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.050231 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.050252 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.153704 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.153812 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.153837 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.153864 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.153884 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.256951 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.257019 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.257029 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.257069 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.257083 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.359068 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.359120 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.359137 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.359161 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.359207 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.462738 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.462781 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.462794 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.462815 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.462833 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.564215 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 01:47:38.487743583 +0000 UTC Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.565532 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.565589 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.565600 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.565623 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.565637 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.669084 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.669159 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.669220 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.669266 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.669293 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.772274 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.772316 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.772328 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.772346 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.772359 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.874645 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.874690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.874698 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.874714 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.874723 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.978662 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.978726 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.978745 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.978772 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:38 crc kubenswrapper[4685]: I0128 12:22:38.978790 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:38Z","lastTransitionTime":"2026-01-28T12:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.082822 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.082886 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.082928 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.082964 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.082992 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.186784 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.186865 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.186888 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.186917 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.186935 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.290653 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.290749 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.290770 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.290796 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.290814 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.393757 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.393823 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.393837 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.393860 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.393875 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.495923 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.495986 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.496004 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.496034 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.496053 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.545632 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.545699 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.545769 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:39 crc kubenswrapper[4685]: E0128 12:22:39.545850 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.545867 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:39 crc kubenswrapper[4685]: E0128 12:22:39.546051 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:39 crc kubenswrapper[4685]: E0128 12:22:39.546509 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:39 crc kubenswrapper[4685]: E0128 12:22:39.546428 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.564779 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 07:31:51.707989805 +0000 UTC Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.598783 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.598844 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.598861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.598901 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.598921 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.701622 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.701676 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.701686 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.701705 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.701719 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.804845 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.804921 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.804937 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.804962 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.804979 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.908183 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.908225 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.908260 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.908280 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:39 crc kubenswrapper[4685]: I0128 12:22:39.908293 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:39Z","lastTransitionTime":"2026-01-28T12:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.010501 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.010534 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.010542 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.010556 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.010566 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.114014 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.114077 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.114087 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.114107 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.114123 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.216684 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.216731 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.216744 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.216764 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.216777 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.320094 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.320156 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.320187 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.320206 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.320221 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.422900 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.422938 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.422947 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.422963 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.422973 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.526236 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.526284 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.526300 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.526317 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.526328 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.565299 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 18:36:37.247604552 +0000 UTC Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.613941 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-ml94r" podStartSLOduration=77.613914972 podStartE2EDuration="1m17.613914972s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.613740617 +0000 UTC m=+111.701154492" watchObservedRunningTime="2026-01-28 12:22:40.613914972 +0000 UTC m=+111.701328827" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.628590 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.628636 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.628651 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.628669 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.628682 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.648054 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=77.648035299 podStartE2EDuration="1m17.648035299s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.631551656 +0000 UTC m=+111.718965491" watchObservedRunningTime="2026-01-28 12:22:40.648035299 +0000 UTC m=+111.735449144" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.648261 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=77.648256025 podStartE2EDuration="1m17.648256025s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.647908796 +0000 UTC m=+111.735322691" watchObservedRunningTime="2026-01-28 12:22:40.648256025 +0000 UTC m=+111.735669880" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.724381 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-bpbjn" podStartSLOduration=77.724362741 podStartE2EDuration="1m17.724362741s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.708509175 +0000 UTC m=+111.795923050" watchObservedRunningTime="2026-01-28 12:22:40.724362741 +0000 UTC m=+111.811776576" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.730705 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.730749 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.730760 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.730777 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.730789 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.735709 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=35.735695896 podStartE2EDuration="35.735695896s" podCreationTimestamp="2026-01-28 12:22:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.735080039 +0000 UTC m=+111.822493884" watchObservedRunningTime="2026-01-28 12:22:40.735695896 +0000 UTC m=+111.823109731" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.768631 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=48.768611511 podStartE2EDuration="48.768611511s" podCreationTimestamp="2026-01-28 12:21:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.751701836 +0000 UTC m=+111.839115671" watchObservedRunningTime="2026-01-28 12:22:40.768611511 +0000 UTC m=+111.856025366" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.800269 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-m96cf" podStartSLOduration=76.800253602 podStartE2EDuration="1m16.800253602s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.800049836 +0000 UTC m=+111.887463671" watchObservedRunningTime="2026-01-28 12:22:40.800253602 +0000 UTC m=+111.887667437" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.817487 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-vkpgm" podStartSLOduration=76.817468645 podStartE2EDuration="1m16.817468645s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.817375742 +0000 UTC m=+111.904789597" watchObservedRunningTime="2026-01-28 12:22:40.817468645 +0000 UTC m=+111.904882480" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.833682 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.833725 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.833737 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.833755 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.833765 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.856578 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rrnv6" podStartSLOduration=76.856554776 podStartE2EDuration="1m16.856554776s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.853954996 +0000 UTC m=+111.941368851" watchObservedRunningTime="2026-01-28 12:22:40.856554776 +0000 UTC m=+111.943968621" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.909105 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podStartSLOduration=77.909081278 podStartE2EDuration="1m17.909081278s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:40.908355268 +0000 UTC m=+111.995769103" watchObservedRunningTime="2026-01-28 12:22:40.909081278 +0000 UTC m=+111.996495113" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.935898 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.935943 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.935955 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.935973 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:40 crc kubenswrapper[4685]: I0128 12:22:40.935985 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:40Z","lastTransitionTime":"2026-01-28T12:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.039810 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.039861 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.039878 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.039905 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.039924 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.143147 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.143241 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.143268 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.143302 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.143323 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.246499 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.246566 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.246583 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.246609 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.246626 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.348690 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.348751 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.348771 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.348799 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.348818 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.452647 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.452720 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.452763 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.452795 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.452818 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.545773 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:41 crc kubenswrapper[4685]: E0128 12:22:41.545976 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.546339 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:41 crc kubenswrapper[4685]: E0128 12:22:41.546434 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.546599 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.546618 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:41 crc kubenswrapper[4685]: E0128 12:22:41.546831 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:41 crc kubenswrapper[4685]: E0128 12:22:41.547058 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.556199 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.556258 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.556286 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.556315 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.556339 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.565898 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 07:18:09.939856493 +0000 UTC Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.660697 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.660760 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.660778 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.660806 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.660827 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.763823 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.763914 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.763940 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.763970 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.763991 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.806548 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.806609 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.806628 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.806658 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.806681 4685 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T12:22:41Z","lastTransitionTime":"2026-01-28T12:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.885156 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph"] Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.885865 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.889774 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.891322 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.891406 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.891477 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.915656 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb96d012-bee9-4f59-86e5-5188a8d514f6-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.915704 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb96d012-bee9-4f59-86e5-5188a8d514f6-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.915734 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb96d012-bee9-4f59-86e5-5188a8d514f6-service-ca\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.915783 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb96d012-bee9-4f59-86e5-5188a8d514f6-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:41 crc kubenswrapper[4685]: I0128 12:22:41.915851 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb96d012-bee9-4f59-86e5-5188a8d514f6-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.016906 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb96d012-bee9-4f59-86e5-5188a8d514f6-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.016961 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb96d012-bee9-4f59-86e5-5188a8d514f6-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.016998 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb96d012-bee9-4f59-86e5-5188a8d514f6-service-ca\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.017032 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb96d012-bee9-4f59-86e5-5188a8d514f6-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.017079 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb96d012-bee9-4f59-86e5-5188a8d514f6-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.017118 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb96d012-bee9-4f59-86e5-5188a8d514f6-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.018108 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb96d012-bee9-4f59-86e5-5188a8d514f6-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.018892 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb96d012-bee9-4f59-86e5-5188a8d514f6-service-ca\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.029211 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb96d012-bee9-4f59-86e5-5188a8d514f6-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.044242 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb96d012-bee9-4f59-86e5-5188a8d514f6-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-nl9ph\" (UID: \"eb96d012-bee9-4f59-86e5-5188a8d514f6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.213462 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" Jan 28 12:22:42 crc kubenswrapper[4685]: W0128 12:22:42.234738 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb96d012_bee9_4f59_86e5_5188a8d514f6.slice/crio-e3840d3c21ce906118d15aad2935e9e1af9c683e10ca4aa67f48db2ef5c72012 WatchSource:0}: Error finding container e3840d3c21ce906118d15aad2935e9e1af9c683e10ca4aa67f48db2ef5c72012: Status 404 returned error can't find the container with id e3840d3c21ce906118d15aad2935e9e1af9c683e10ca4aa67f48db2ef5c72012 Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.373249 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" event={"ID":"eb96d012-bee9-4f59-86e5-5188a8d514f6","Type":"ContainerStarted","Data":"e3840d3c21ce906118d15aad2935e9e1af9c683e10ca4aa67f48db2ef5c72012"} Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.566689 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 00:55:05.550386866 +0000 UTC Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.566763 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 28 12:22:42 crc kubenswrapper[4685]: I0128 12:22:42.577139 4685 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 12:22:43 crc kubenswrapper[4685]: I0128 12:22:43.231321 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:43 crc kubenswrapper[4685]: E0128 12:22:43.231660 4685 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:22:43 crc kubenswrapper[4685]: E0128 12:22:43.231779 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs podName:5f0d7b7e-1577-4289-9043-ddf8dd9a48ef nodeName:}" failed. No retries permitted until 2026-01-28 12:23:47.231749266 +0000 UTC m=+178.319163131 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs") pod "network-metrics-daemon-5x4kp" (UID: "5f0d7b7e-1577-4289-9043-ddf8dd9a48ef") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 12:22:43 crc kubenswrapper[4685]: I0128 12:22:43.380124 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" event={"ID":"eb96d012-bee9-4f59-86e5-5188a8d514f6","Type":"ContainerStarted","Data":"0ab1b802741a8a272bc781c37858d2e5e10833310b4270cc631a912fec28a164"} Jan 28 12:22:43 crc kubenswrapper[4685]: I0128 12:22:43.402046 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nl9ph" podStartSLOduration=80.402020534 podStartE2EDuration="1m20.402020534s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:43.400487532 +0000 UTC m=+114.487901427" watchObservedRunningTime="2026-01-28 12:22:43.402020534 +0000 UTC m=+114.489434409" Jan 28 12:22:43 crc kubenswrapper[4685]: I0128 12:22:43.544763 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:43 crc kubenswrapper[4685]: I0128 12:22:43.544894 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:43 crc kubenswrapper[4685]: E0128 12:22:43.544990 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:43 crc kubenswrapper[4685]: I0128 12:22:43.545027 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:43 crc kubenswrapper[4685]: I0128 12:22:43.545130 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:43 crc kubenswrapper[4685]: E0128 12:22:43.545255 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:43 crc kubenswrapper[4685]: E0128 12:22:43.545400 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:43 crc kubenswrapper[4685]: E0128 12:22:43.545438 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:45 crc kubenswrapper[4685]: I0128 12:22:45.545116 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:45 crc kubenswrapper[4685]: E0128 12:22:45.545333 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:45 crc kubenswrapper[4685]: I0128 12:22:45.545395 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:45 crc kubenswrapper[4685]: I0128 12:22:45.545451 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:45 crc kubenswrapper[4685]: I0128 12:22:45.545830 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:45 crc kubenswrapper[4685]: E0128 12:22:45.546214 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:45 crc kubenswrapper[4685]: E0128 12:22:45.546324 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:45 crc kubenswrapper[4685]: E0128 12:22:45.546423 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:45 crc kubenswrapper[4685]: I0128 12:22:45.546482 4685 scope.go:117] "RemoveContainer" containerID="426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1" Jan 28 12:22:46 crc kubenswrapper[4685]: I0128 12:22:46.394841 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/2.log" Jan 28 12:22:46 crc kubenswrapper[4685]: I0128 12:22:46.399927 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerStarted","Data":"8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb"} Jan 28 12:22:47 crc kubenswrapper[4685]: I0128 12:22:47.403923 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:22:47 crc kubenswrapper[4685]: I0128 12:22:47.458744 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podStartSLOduration=83.458723983 podStartE2EDuration="1m23.458723983s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:47.453716558 +0000 UTC m=+118.541130393" watchObservedRunningTime="2026-01-28 12:22:47.458723983 +0000 UTC m=+118.546137828" Jan 28 12:22:47 crc kubenswrapper[4685]: I0128 12:22:47.546595 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:47 crc kubenswrapper[4685]: E0128 12:22:47.546729 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:47 crc kubenswrapper[4685]: I0128 12:22:47.546991 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:47 crc kubenswrapper[4685]: E0128 12:22:47.547123 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:47 crc kubenswrapper[4685]: I0128 12:22:47.547337 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:47 crc kubenswrapper[4685]: E0128 12:22:47.547435 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:47 crc kubenswrapper[4685]: I0128 12:22:47.547619 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:47 crc kubenswrapper[4685]: E0128 12:22:47.547680 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:48 crc kubenswrapper[4685]: I0128 12:22:48.309096 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-5x4kp"] Jan 28 12:22:48 crc kubenswrapper[4685]: I0128 12:22:48.407271 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:48 crc kubenswrapper[4685]: E0128 12:22:48.407479 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:49 crc kubenswrapper[4685]: I0128 12:22:49.545440 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:49 crc kubenswrapper[4685]: I0128 12:22:49.545456 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:49 crc kubenswrapper[4685]: E0128 12:22:49.546106 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 12:22:49 crc kubenswrapper[4685]: E0128 12:22:49.545927 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 12:22:49 crc kubenswrapper[4685]: I0128 12:22:49.545487 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:49 crc kubenswrapper[4685]: E0128 12:22:49.546322 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 12:22:49 crc kubenswrapper[4685]: I0128 12:22:49.566296 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 28 12:22:50 crc kubenswrapper[4685]: I0128 12:22:50.545655 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:50 crc kubenswrapper[4685]: E0128 12:22:50.547756 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5x4kp" podUID="5f0d7b7e-1577-4289-9043-ddf8dd9a48ef" Jan 28 12:22:50 crc kubenswrapper[4685]: E0128 12:22:50.567612 4685 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.545024 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.545032 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.545064 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.548346 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.548879 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.548889 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.549054 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 12:22:51 crc kubenswrapper[4685]: I0128 12:22:51.963472 4685 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.019312 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=3.01928338 podStartE2EDuration="3.01928338s" podCreationTimestamp="2026-01-28 12:22:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:22:50.592814147 +0000 UTC m=+121.680227992" watchObservedRunningTime="2026-01-28 12:22:52.01928338 +0000 UTC m=+123.106697245" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.020120 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7pxnw"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.020739 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.024048 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.025553 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.025938 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.029945 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.030034 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.030144 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.037199 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dd6rs"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.038015 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.038794 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.039674 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.041846 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-54gh9"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.042695 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-xcd5g"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.042915 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.043639 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.047121 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.048282 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.049085 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.049674 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.051934 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.052539 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.052844 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.053811 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.054326 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.054488 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.054745 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.054994 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.061428 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.062208 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.062222 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.063588 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.061829 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zp7xc"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.065835 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.068289 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.065938 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.066018 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.066745 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.066840 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.066903 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.066986 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.067366 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.081238 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.081620 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.081776 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.081866 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.081991 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.082119 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.082941 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-z2lzl"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.083245 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.083423 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.083616 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.083719 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.084137 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.083723 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.084337 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.083759 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.084500 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.084571 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.085299 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.088213 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-6rxh9"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.088351 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.088483 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.088598 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.088776 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.089078 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.089582 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.089642 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.089768 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.090239 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-68qg7"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.090693 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.090981 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.093288 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.094884 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.096043 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.099781 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.100808 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-lr8cn"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.101674 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.102317 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.103445 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.103851 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.105290 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dxl25"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.107395 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.107599 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.107631 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.107807 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.107936 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108081 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108136 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108081 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108289 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108490 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108493 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108565 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108583 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108660 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108717 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108746 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108783 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.108793 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.109971 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110188 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110318 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110409 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110506 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110624 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110710 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110726 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110780 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110856 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110909 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110860 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111115 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111132 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.110994 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111237 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111029 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111046 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111070 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111739 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.111832 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.112334 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.112593 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.112953 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.113013 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.114365 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.115039 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.115658 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.116296 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.119483 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.119899 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.120364 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.120369 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.126904 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.127562 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.130744 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.131220 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.136544 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.136549 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.144712 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.182949 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183704 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdz4n\" (UniqueName: \"kubernetes.io/projected/0353bfa2-54e9-4f75-ab25-ed65d14a9ab7-kube-api-access-jdz4n\") pod \"cluster-samples-operator-665b6dd947-qfcjm\" (UID: \"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183735 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-encryption-config\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183752 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkc4z\" (UniqueName: \"kubernetes.io/projected/91aac1a9-a4dd-4668-af62-e76501c860ac-kube-api-access-zkc4z\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183765 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-xvw2d"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183777 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183793 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-etcd-serving-ca\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183810 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-audit\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183828 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-service-ca\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183847 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-config\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183861 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91fd9e56-9836-4427-b58e-9c0742895c7a-config\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183877 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91aac1a9-a4dd-4668-af62-e76501c860ac-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183891 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/0353bfa2-54e9-4f75-ab25-ed65d14a9ab7-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qfcjm\" (UID: \"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183906 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-image-import-ca\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183919 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b22a7729-d10a-412c-8df1-30992ba607b0-audit-dir\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183933 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91aac1a9-a4dd-4668-af62-e76501c860ac-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183949 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d97a0072-f1ae-4674-8ad6-aab9c60155f3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183963 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl27d\" (UniqueName: \"kubernetes.io/projected/91fd9e56-9836-4427-b58e-9c0742895c7a-kube-api-access-gl27d\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183979 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/31a31a5b-65f9-4997-b083-16382c0f6c11-metrics-tls\") pod \"dns-operator-744455d44c-6rxh9\" (UID: \"31a31a5b-65f9-4997-b083-16382c0f6c11\") " pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.183992 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9df99517-c3a0-4fde-a93f-24b84a33615a-serving-cert\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184023 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f7d6\" (UniqueName: \"kubernetes.io/projected/b22a7729-d10a-412c-8df1-30992ba607b0-kube-api-access-6f7d6\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184038 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/91fd9e56-9836-4427-b58e-9c0742895c7a-machine-approver-tls\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184060 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-serving-cert\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184073 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-ca\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184091 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/b22a7729-d10a-412c-8df1-30992ba607b0-node-pullsecrets\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184115 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcnds\" (UniqueName: \"kubernetes.io/projected/31a31a5b-65f9-4997-b083-16382c0f6c11-kube-api-access-qcnds\") pod \"dns-operator-744455d44c-6rxh9\" (UID: \"31a31a5b-65f9-4997-b083-16382c0f6c11\") " pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184129 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-config\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184146 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm697\" (UniqueName: \"kubernetes.io/projected/9df99517-c3a0-4fde-a93f-24b84a33615a-kube-api-access-xm697\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184163 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d97a0072-f1ae-4674-8ad6-aab9c60155f3-config\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184213 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-client\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184227 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/91fd9e56-9836-4427-b58e-9c0742895c7a-auth-proxy-config\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184243 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d97a0072-f1ae-4674-8ad6-aab9c60155f3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184261 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-etcd-client\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184331 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184130 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.184601 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.187049 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.187229 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.187920 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.189749 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.190438 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.192971 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.193335 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.193788 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.196372 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.198958 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.200194 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.200453 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.200529 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qpc29"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.200985 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.205141 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.205310 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lw2hz"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.206200 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.206842 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.208033 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.208265 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.212001 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.212590 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.212977 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.213374 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.213512 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.213379 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.216075 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.216305 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.216886 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7bmls"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.217183 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.217966 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.218399 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.218511 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.218660 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.219152 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4gdsr"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.219326 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.220232 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-54gh9"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.220324 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.221151 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.222230 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-ftmbf"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.222912 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.223417 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.223620 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.225252 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.226577 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.231774 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-z2lzl"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.239987 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-6rxh9"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.241856 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dd6rs"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.243631 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.257270 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-lr8cn"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.259194 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.260620 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.261881 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.263584 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.265188 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.267116 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.268400 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dxl25"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.269662 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zp7xc"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.271003 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xvxqg"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.272025 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-slprg"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.272242 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.272937 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-slprg" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.273253 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.274424 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-68qg7"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.275613 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qpc29"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.276421 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.277413 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lw2hz"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.278543 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.279567 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.280718 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.281927 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.283125 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7pxnw"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284259 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284884 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-config\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284913 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91fd9e56-9836-4427-b58e-9c0742895c7a-config\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284932 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91aac1a9-a4dd-4668-af62-e76501c860ac-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284950 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/0353bfa2-54e9-4f75-ab25-ed65d14a9ab7-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qfcjm\" (UID: \"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284967 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-image-import-ca\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284983 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91aac1a9-a4dd-4668-af62-e76501c860ac-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.284999 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d97a0072-f1ae-4674-8ad6-aab9c60155f3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285014 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl27d\" (UniqueName: \"kubernetes.io/projected/91fd9e56-9836-4427-b58e-9c0742895c7a-kube-api-access-gl27d\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285031 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b22a7729-d10a-412c-8df1-30992ba607b0-audit-dir\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285047 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/31a31a5b-65f9-4997-b083-16382c0f6c11-metrics-tls\") pod \"dns-operator-744455d44c-6rxh9\" (UID: \"31a31a5b-65f9-4997-b083-16382c0f6c11\") " pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285063 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9df99517-c3a0-4fde-a93f-24b84a33615a-serving-cert\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285094 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f7d6\" (UniqueName: \"kubernetes.io/projected/b22a7729-d10a-412c-8df1-30992ba607b0-kube-api-access-6f7d6\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285110 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/91fd9e56-9836-4427-b58e-9c0742895c7a-machine-approver-tls\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285139 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-serving-cert\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285155 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-ca\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285187 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/b22a7729-d10a-412c-8df1-30992ba607b0-node-pullsecrets\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285211 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcnds\" (UniqueName: \"kubernetes.io/projected/31a31a5b-65f9-4997-b083-16382c0f6c11-kube-api-access-qcnds\") pod \"dns-operator-744455d44c-6rxh9\" (UID: \"31a31a5b-65f9-4997-b083-16382c0f6c11\") " pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285229 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-config\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285246 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm697\" (UniqueName: \"kubernetes.io/projected/9df99517-c3a0-4fde-a93f-24b84a33615a-kube-api-access-xm697\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285265 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d97a0072-f1ae-4674-8ad6-aab9c60155f3-config\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285290 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-client\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285305 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/91fd9e56-9836-4427-b58e-9c0742895c7a-auth-proxy-config\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285324 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-etcd-client\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285339 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d97a0072-f1ae-4674-8ad6-aab9c60155f3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285356 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdz4n\" (UniqueName: \"kubernetes.io/projected/0353bfa2-54e9-4f75-ab25-ed65d14a9ab7-kube-api-access-jdz4n\") pod \"cluster-samples-operator-665b6dd947-qfcjm\" (UID: \"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285382 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285398 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-encryption-config\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285413 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkc4z\" (UniqueName: \"kubernetes.io/projected/91aac1a9-a4dd-4668-af62-e76501c860ac-kube-api-access-zkc4z\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285433 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-etcd-serving-ca\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285451 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-audit\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.285473 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-service-ca\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.286224 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-config\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.286236 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-service-ca\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.286262 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b22a7729-d10a-412c-8df1-30992ba607b0-audit-dir\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.286289 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.286447 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-xcd5g"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.286742 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91fd9e56-9836-4427-b58e-9c0742895c7a-config\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.287000 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-image-import-ca\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.287694 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91aac1a9-a4dd-4668-af62-e76501c860ac-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.288270 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/91fd9e56-9836-4427-b58e-9c0742895c7a-auth-proxy-config\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.288814 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.288882 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.289412 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xvxqg"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.289455 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d97a0072-f1ae-4674-8ad6-aab9c60155f3-config\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.289461 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-ca\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.289529 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/b22a7729-d10a-412c-8df1-30992ba607b0-node-pullsecrets\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.290758 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-audit\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.290825 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/b22a7729-d10a-412c-8df1-30992ba607b0-etcd-serving-ca\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.290989 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d97a0072-f1ae-4674-8ad6-aab9c60155f3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.291223 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-etcd-client\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.291438 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/31a31a5b-65f9-4997-b083-16382c0f6c11-metrics-tls\") pod \"dns-operator-744455d44c-6rxh9\" (UID: \"31a31a5b-65f9-4997-b083-16382c0f6c11\") " pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.291901 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.292018 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/0353bfa2-54e9-4f75-ab25-ed65d14a9ab7-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qfcjm\" (UID: \"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.292077 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9df99517-c3a0-4fde-a93f-24b84a33615a-config\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.293209 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91aac1a9-a4dd-4668-af62-e76501c860ac-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.293480 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.294636 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-serving-cert\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.294696 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9df99517-c3a0-4fde-a93f-24b84a33615a-etcd-client\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.294825 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/91fd9e56-9836-4427-b58e-9c0742895c7a-machine-approver-tls\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.295363 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9df99517-c3a0-4fde-a93f-24b84a33615a-serving-cert\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.297321 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4gdsr"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.298640 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7bmls"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.300310 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.301983 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-slprg"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.303408 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-gjp8v"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.303778 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.304236 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.306342 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-gjp8v"] Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.308975 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/b22a7729-d10a-412c-8df1-30992ba607b0-encryption-config\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.323265 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.342604 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.363136 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.383094 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.402624 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.423216 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.443026 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.470718 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.484033 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.504757 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.523674 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.543033 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.545078 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.563047 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.587589 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.604912 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.623611 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.643601 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.662633 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.684539 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.704857 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.725114 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.744766 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.764112 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.784019 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.807307 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.824812 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.844486 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.863033 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.883261 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.914389 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.924050 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.943449 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.963652 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 12:22:52 crc kubenswrapper[4685]: I0128 12:22:52.984348 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.003728 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.049503 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.050249 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.063079 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.082135 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.103962 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.123153 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.144022 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.163107 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.183809 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.203367 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.221158 4685 request.go:700] Waited for 1.004552408s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.222877 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.243565 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.263912 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.283134 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.303123 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.323544 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.343698 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.363716 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.383716 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.403594 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.424529 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.443920 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.465139 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.483759 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.503976 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.524512 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.544794 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.564137 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.583453 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.603298 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.622998 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.644153 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.673396 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.683984 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.703273 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.727159 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.743778 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.778840 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.791916 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.811095 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.823815 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.843746 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.863468 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.884306 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.924737 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.943992 4685 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.963473 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 12:22:53 crc kubenswrapper[4685]: I0128 12:22:53.982687 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.005024 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.013867 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ad2819-235d-4f6d-b731-6f8e31db0b13-config\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.014559 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-encryption-config\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.014897 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-registry-certificates\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.015295 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-bound-sa-token\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.015533 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p68r\" (UniqueName: \"kubernetes.io/projected/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-kube-api-access-8p68r\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.015735 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a7ad2819-235d-4f6d-b731-6f8e31db0b13-images\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.015897 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.015969 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-audit-policies\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016018 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrp77\" (UniqueName: \"kubernetes.io/projected/79a36fbf-eb6c-442f-b6f0-d4a5f7435dde-kube-api-access-qrp77\") pod \"downloads-7954f5f757-54gh9\" (UID: \"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde\") " pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016122 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016156 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-client-ca\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016302 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd752\" (UniqueName: \"kubernetes.io/projected/edb4a604-6b03-48d4-b5ab-09e266b5eef8-kube-api-access-hd752\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016360 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq58r\" (UniqueName: \"kubernetes.io/projected/a7ad2819-235d-4f6d-b731-6f8e31db0b13-kube-api-access-tq58r\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016403 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016432 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46b82026-d586-40b2-ad5b-fc08674d7067-ca-trust-extracted\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.016456 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-service-ca\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.017095 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:54.517078843 +0000 UTC m=+125.604492808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.017213 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-service-ca-bundle\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.017290 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2143d413-3556-4232-a3d6-145f94d98606-serving-cert\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.017327 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2143d413-3556-4232-a3d6-145f94d98606-trusted-ca\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.017642 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.017844 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df73faab-d681-4ffe-b550-331fa336bc10-serving-cert\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.018031 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-config\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.018270 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46b82026-d586-40b2-ad5b-fc08674d7067-installation-pull-secrets\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.018455 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-audit-dir\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.018625 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.018801 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-trusted-ca\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.018966 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55j98\" (UniqueName: \"kubernetes.io/projected/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-kube-api-access-55j98\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.019150 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/08663495-9331-4b6f-b82a-67b308a9afa3-console-oauth-config\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.019367 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m6dk\" (UniqueName: \"kubernetes.io/projected/2143d413-3556-4232-a3d6-145f94d98606-kube-api-access-4m6dk\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.019587 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snn7d\" (UniqueName: \"kubernetes.io/projected/45dd6f4b-7432-459c-ba00-b849e384eaae-kube-api-access-snn7d\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.019816 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/75a32638-e479-4025-abd5-d00533347443-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.020030 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-console-config\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.020237 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-registry-tls\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.020806 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/08663495-9331-4b6f-b82a-67b308a9afa3-console-serving-cert\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.020977 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7h8g\" (UniqueName: \"kubernetes.io/projected/df73faab-d681-4ffe-b550-331fa336bc10-kube-api-access-l7h8g\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.021132 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-oauth-serving-cert\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.021391 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-config\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.021552 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edb4a604-6b03-48d4-b5ab-09e266b5eef8-serving-cert\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.021695 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-etcd-client\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.021850 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-config\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.022031 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlkkd\" (UniqueName: \"kubernetes.io/projected/08663495-9331-4b6f-b82a-67b308a9afa3-kube-api-access-dlkkd\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.022246 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45dd6f4b-7432-459c-ba00-b849e384eaae-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.022406 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/45dd6f4b-7432-459c-ba00-b849e384eaae-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.022574 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-trusted-ca-bundle\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.022740 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-serving-cert\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.022904 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8p76\" (UniqueName: \"kubernetes.io/projected/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-kube-api-access-v8p76\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.023079 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.023273 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75a32638-e479-4025-abd5-d00533347443-serving-cert\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.023472 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-serving-cert\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.023629 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a7ad2819-235d-4f6d-b731-6f8e31db0b13-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.023869 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxsgk\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-kube-api-access-wxsgk\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.024068 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcrl6\" (UniqueName: \"kubernetes.io/projected/75a32638-e479-4025-abd5-d00533347443-kube-api-access-kcrl6\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.024277 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2143d413-3556-4232-a3d6-145f94d98606-config\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.024499 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-client-ca\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.024661 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/45dd6f4b-7432-459c-ba00-b849e384eaae-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.024923 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.024976 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.077366 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl27d\" (UniqueName: \"kubernetes.io/projected/91fd9e56-9836-4427-b58e-9c0742895c7a-kube-api-access-gl27d\") pod \"machine-approver-56656f9798-4h4rx\" (UID: \"91fd9e56-9836-4427-b58e-9c0742895c7a\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.105359 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdz4n\" (UniqueName: \"kubernetes.io/projected/0353bfa2-54e9-4f75-ab25-ed65d14a9ab7-kube-api-access-jdz4n\") pod \"cluster-samples-operator-665b6dd947-qfcjm\" (UID: \"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.113391 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkc4z\" (UniqueName: \"kubernetes.io/projected/91aac1a9-a4dd-4668-af62-e76501c860ac-kube-api-access-zkc4z\") pod \"openshift-controller-manager-operator-756b6f6bc6-psh57\" (UID: \"91aac1a9-a4dd-4668-af62-e76501c860ac\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.125904 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126095 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-node-bootstrap-token\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126148 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126210 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3fdafa55-db38-4041-bb34-f319905ad733-srv-cert\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126244 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f4fa09-765f-45c8-8206-290bf19fab29-config\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126287 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126321 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-client-ca\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126359 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126423 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-socket-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.126564 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:54.626539436 +0000 UTC m=+125.713953301 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126859 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-metrics-certs\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126911 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftnl2\" (UniqueName: \"kubernetes.io/projected/f09051b7-ad38-4a84-89d7-ad84fd51fa1c-kube-api-access-ftnl2\") pod \"package-server-manager-789f6589d5-922md\" (UID: \"f09051b7-ad38-4a84-89d7-ad84fd51fa1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126950 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.126984 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc7e3a89-0688-4bda-a76c-1b03e21b8419-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.127668 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-bound-sa-token\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.127801 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p68r\" (UniqueName: \"kubernetes.io/projected/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-kube-api-access-8p68r\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.127996 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvznn\" (UniqueName: \"kubernetes.io/projected/8eb33408-9530-4b30-a357-9ecd1a094606-kube-api-access-dvznn\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128113 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128051 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128340 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zcgc\" (UniqueName: \"kubernetes.io/projected/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-kube-api-access-8zcgc\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128433 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128579 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-csi-data-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128619 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-client-ca\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128801 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hl5x2\" (UniqueName: \"kubernetes.io/projected/25efaeca-af03-4a53-9955-993273376049-kube-api-access-hl5x2\") pod \"ingress-canary-gjp8v\" (UID: \"25efaeca-af03-4a53-9955-993273376049\") " pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.128993 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-stats-auth\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129089 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129135 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39f4fa09-765f-45c8-8206-290bf19fab29-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129262 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v4x8\" (UniqueName: \"kubernetes.io/projected/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-kube-api-access-7v4x8\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129390 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129435 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46b82026-d586-40b2-ad5b-fc08674d7067-ca-trust-extracted\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129473 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129512 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129543 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2143d413-3556-4232-a3d6-145f94d98606-serving-cert\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129570 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2143d413-3556-4232-a3d6-145f94d98606-trusted-ca\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129601 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-proxy-tls\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129713 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129818 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0aa6b49a-8078-44f4-b1a9-2542d5bad461-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jbtd9\" (UID: \"0aa6b49a-8078-44f4-b1a9-2542d5bad461\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.129906 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8eb33408-9530-4b30-a357-9ecd1a094606-profile-collector-cert\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130136 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-webhook-cert\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130205 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-trusted-ca\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130134 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46b82026-d586-40b2-ad5b-fc08674d7067-ca-trust-extracted\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130236 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m6dk\" (UniqueName: \"kubernetes.io/projected/2143d413-3556-4232-a3d6-145f94d98606-kube-api-access-4m6dk\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130266 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl5k8\" (UniqueName: \"kubernetes.io/projected/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-kube-api-access-gl5k8\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130298 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snn7d\" (UniqueName: \"kubernetes.io/projected/45dd6f4b-7432-459c-ba00-b849e384eaae-kube-api-access-snn7d\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130335 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/75a32638-e479-4025-abd5-d00533347443-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130365 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc7e3a89-0688-4bda-a76c-1b03e21b8419-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130391 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-registry-tls\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130422 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-oauth-serving-cert\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130429 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130449 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130480 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/421ae539-a9a0-4a76-be82-23ea497077ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130509 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-config\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130536 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4aea2580-1e20-4df6-b328-c1b99d986330-serving-cert\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130567 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrdkn\" (UniqueName: \"kubernetes.io/projected/4aea2580-1e20-4df6-b328-c1b99d986330-kube-api-access-qrdkn\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130593 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-tmpfs\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130619 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-serving-cert\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130644 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45dd6f4b-7432-459c-ba00-b849e384eaae-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130668 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-mountpoint-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130702 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkg7l\" (UniqueName: \"kubernetes.io/projected/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-kube-api-access-xkg7l\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130729 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a7ad2819-235d-4f6d-b731-6f8e31db0b13-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130756 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srmxx\" (UniqueName: \"kubernetes.io/projected/cc7e3a89-0688-4bda-a76c-1b03e21b8419-kube-api-access-srmxx\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130814 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2143d413-3556-4232-a3d6-145f94d98606-config\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130841 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-apiservice-cert\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130870 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq58s\" (UniqueName: \"kubernetes.io/projected/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-kube-api-access-xq58s\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130898 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130925 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/45dd6f4b-7432-459c-ba00-b849e384eaae-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.130992 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ad2819-235d-4f6d-b731-6f8e31db0b13-config\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131018 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-default-certificate\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131044 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/75a32638-e479-4025-abd5-d00533347443-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131048 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131485 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131503 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-encryption-config\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131605 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr2k7\" (UniqueName: \"kubernetes.io/projected/54c1da50-0209-4285-bc15-91427e15241d-kube-api-access-sr2k7\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131684 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-registry-certificates\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131765 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a7ad2819-235d-4f6d-b731-6f8e31db0b13-images\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131830 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82c02f94-72c8-43fd-b0f0-7c644d696b61-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.131937 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82c02f94-72c8-43fd-b0f0-7c644d696b61-trusted-ca\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132038 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132086 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-trusted-ca\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132113 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8eb33408-9530-4b30-a357-9ecd1a094606-srv-cert\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132149 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2143d413-3556-4232-a3d6-145f94d98606-trusted-ca\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132217 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x87lh\" (UniqueName: \"kubernetes.io/projected/0aa6b49a-8078-44f4-b1a9-2542d5bad461-kube-api-access-x87lh\") pod \"control-plane-machine-set-operator-78cbb6b69f-jbtd9\" (UID: \"0aa6b49a-8078-44f4-b1a9-2542d5bad461\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132277 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f09051b7-ad38-4a84-89d7-ad84fd51fa1c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-922md\" (UID: \"f09051b7-ad38-4a84-89d7-ad84fd51fa1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132366 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-audit-policies\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132419 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrp77\" (UniqueName: \"kubernetes.io/projected/79a36fbf-eb6c-442f-b6f0-d4a5f7435dde-kube-api-access-qrp77\") pod \"downloads-7954f5f757-54gh9\" (UID: \"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde\") " pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132475 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132532 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-registration-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132626 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-policies\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132690 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/82c02f94-72c8-43fd-b0f0-7c644d696b61-metrics-tls\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132745 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnfmt\" (UniqueName: \"kubernetes.io/projected/b65cc4af-8099-4f34-a93e-9ef9ae97996b-kube-api-access-hnfmt\") pod \"migrator-59844c95c7-s4474\" (UID: \"b65cc4af-8099-4f34-a93e-9ef9ae97996b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132795 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-metrics-tls\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132855 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/02b54db7-da24-4fc2-88e2-cac233ca6323-signing-key\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.132908 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gm6w\" (UniqueName: \"kubernetes.io/projected/02b54db7-da24-4fc2-88e2-cac233ca6323-kube-api-access-7gm6w\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.133358 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-oauth-serving-cert\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.133641 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:54.633614507 +0000 UTC m=+125.721028352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.135953 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-encryption-config\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.136144 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2143d413-3556-4232-a3d6-145f94d98606-serving-cert\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.136787 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d97a0072-f1ae-4674-8ad6-aab9c60155f3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-4x7zt\" (UID: \"d97a0072-f1ae-4674-8ad6-aab9c60155f3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.137783 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-config\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.138743 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2143d413-3556-4232-a3d6-145f94d98606-config\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.139152 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-audit-policies\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.139295 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a7ad2819-235d-4f6d-b731-6f8e31db0b13-images\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.140433 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/45dd6f4b-7432-459c-ba00-b849e384eaae-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.140561 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.140818 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-client-ca\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141111 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd752\" (UniqueName: \"kubernetes.io/projected/edb4a604-6b03-48d4-b5ab-09e266b5eef8-kube-api-access-hd752\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141256 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-certs\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141333 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq58r\" (UniqueName: \"kubernetes.io/projected/a7ad2819-235d-4f6d-b731-6f8e31db0b13-kube-api-access-tq58r\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141428 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-images\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141346 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141541 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ad2819-235d-4f6d-b731-6f8e31db0b13-config\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141633 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-service-ca\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141698 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4kwh\" (UniqueName: \"kubernetes.io/projected/4cd9e183-ef71-4bc0-af54-df333c728cc4-kube-api-access-n4kwh\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141741 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141755 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-registry-tls\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141790 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141835 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-service-ca-bundle\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141870 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141907 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rccz9\" (UniqueName: \"kubernetes.io/projected/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-kube-api-access-rccz9\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.141964 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-config-volume\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142095 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df73faab-d681-4ffe-b550-331fa336bc10-serving-cert\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142483 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-registry-certificates\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142515 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142615 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-config\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142818 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142933 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pckm\" (UniqueName: \"kubernetes.io/projected/82c02f94-72c8-43fd-b0f0-7c644d696b61-kube-api-access-7pckm\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143035 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/421ae539-a9a0-4a76-be82-23ea497077ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143130 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/02b54db7-da24-4fc2-88e2-cac233ca6323-signing-cabundle\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142989 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df73faab-d681-4ffe-b550-331fa336bc10-service-ca-bundle\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.142974 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-client-ca\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143393 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46b82026-d586-40b2-ad5b-fc08674d7067-installation-pull-secrets\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143499 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-audit-dir\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143563 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-service-ca\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143592 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143715 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3fdafa55-db38-4041-bb34-f319905ad733-profile-collector-cert\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143772 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-proxy-tls\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143821 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/08663495-9331-4b6f-b82a-67b308a9afa3-console-oauth-config\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143902 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55j98\" (UniqueName: \"kubernetes.io/projected/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-kube-api-access-55j98\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.143952 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-dir\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.144053 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/021e1574-af09-49bb-af67-1fc874bb0a06-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lw2hz\" (UID: \"021e1574-af09-49bb-af67-1fc874bb0a06\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.144118 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145366 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-audit-dir\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.144116 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9xtj\" (UniqueName: \"kubernetes.io/projected/3fdafa55-db38-4041-bb34-f319905ad733-kube-api-access-j9xtj\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145537 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/45dd6f4b-7432-459c-ba00-b849e384eaae-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145595 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/08663495-9331-4b6f-b82a-67b308a9afa3-console-serving-cert\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145643 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-console-config\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145688 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7h8g\" (UniqueName: \"kubernetes.io/projected/df73faab-d681-4ffe-b550-331fa336bc10-kube-api-access-l7h8g\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145733 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrdhj\" (UniqueName: \"kubernetes.io/projected/021e1574-af09-49bb-af67-1fc874bb0a06-kube-api-access-vrdhj\") pod \"multus-admission-controller-857f4d67dd-lw2hz\" (UID: \"021e1574-af09-49bb-af67-1fc874bb0a06\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145775 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cd9e183-ef71-4bc0-af54-df333c728cc4-service-ca-bundle\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145814 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/384b638e-e264-4492-8250-95540a24916c-config-volume\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145851 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/39f4fa09-765f-45c8-8206-290bf19fab29-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145911 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4aea2580-1e20-4df6-b328-c1b99d986330-config\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145948 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-etcd-client\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.145986 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-config\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146024 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edb4a604-6b03-48d4-b5ab-09e266b5eef8-serving-cert\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146059 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/421ae539-a9a0-4a76-be82-23ea497077ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146103 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlkkd\" (UniqueName: \"kubernetes.io/projected/08663495-9331-4b6f-b82a-67b308a9afa3-kube-api-access-dlkkd\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146140 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/384b638e-e264-4492-8250-95540a24916c-secret-volume\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146211 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-plugins-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146253 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8p76\" (UniqueName: \"kubernetes.io/projected/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-kube-api-access-v8p76\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146290 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/45dd6f4b-7432-459c-ba00-b849e384eaae-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146327 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-trusted-ca-bundle\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146337 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-config\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146370 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75a32638-e479-4025-abd5-d00533347443-serving-cert\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146431 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-serving-cert\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146473 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25efaeca-af03-4a53-9955-993273376049-cert\") pod \"ingress-canary-gjp8v\" (UID: \"25efaeca-af03-4a53-9955-993273376049\") " pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146510 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6d48\" (UniqueName: \"kubernetes.io/projected/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-kube-api-access-j6d48\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146550 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxsgk\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-kube-api-access-wxsgk\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146600 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcrl6\" (UniqueName: \"kubernetes.io/projected/75a32638-e479-4025-abd5-d00533347443-kube-api-access-kcrl6\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146642 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbjdb\" (UniqueName: \"kubernetes.io/projected/384b638e-e264-4492-8250-95540a24916c-kube-api-access-bbjdb\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.146795 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df73faab-d681-4ffe-b550-331fa336bc10-serving-cert\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.148120 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a7ad2819-235d-4f6d-b731-6f8e31db0b13-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.148162 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-console-config\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.149800 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/08663495-9331-4b6f-b82a-67b308a9afa3-trusted-ca-bundle\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.150144 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.150537 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.152233 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edb4a604-6b03-48d4-b5ab-09e266b5eef8-serving-cert\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.152348 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/08663495-9331-4b6f-b82a-67b308a9afa3-console-serving-cert\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.152443 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-serving-cert\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.153018 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46b82026-d586-40b2-ad5b-fc08674d7067-installation-pull-secrets\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.155103 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75a32638-e479-4025-abd5-d00533347443-serving-cert\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.156468 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-etcd-client\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.156956 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/08663495-9331-4b6f-b82a-67b308a9afa3-console-oauth-config\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.163567 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-serving-cert\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.164046 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcnds\" (UniqueName: \"kubernetes.io/projected/31a31a5b-65f9-4997-b083-16382c0f6c11-kube-api-access-qcnds\") pod \"dns-operator-744455d44c-6rxh9\" (UID: \"31a31a5b-65f9-4997-b083-16382c0f6c11\") " pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.165288 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-config\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.174836 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm697\" (UniqueName: \"kubernetes.io/projected/9df99517-c3a0-4fde-a93f-24b84a33615a-kube-api-access-xm697\") pod \"etcd-operator-b45778765-68qg7\" (UID: \"9df99517-c3a0-4fde-a93f-24b84a33615a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.201939 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f7d6\" (UniqueName: \"kubernetes.io/projected/b22a7729-d10a-412c-8df1-30992ba607b0-kube-api-access-6f7d6\") pod \"apiserver-76f77b778f-zp7xc\" (UID: \"b22a7729-d10a-412c-8df1-30992ba607b0\") " pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.205535 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.214459 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.221406 4685 request.go:700] Waited for 1.916912429s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/secrets?fieldSelector=metadata.name%3Dcanary-serving-cert&limit=500&resourceVersion=0 Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.223796 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 12:22:54 crc kubenswrapper[4685]: W0128 12:22:54.227261 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91fd9e56_9836_4427_b58e_9c0742895c7a.slice/crio-922b722f2166d4b37cc80ebb053ccbb65ee0e576dbf0d722e2778208109b6d1c WatchSource:0}: Error finding container 922b722f2166d4b37cc80ebb053ccbb65ee0e576dbf0d722e2778208109b6d1c: Status 404 returned error can't find the container with id 922b722f2166d4b37cc80ebb053ccbb65ee0e576dbf0d722e2778208109b6d1c Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.243040 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.247577 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.247697 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:54.747666893 +0000 UTC m=+125.835080748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.247886 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkg7l\" (UniqueName: \"kubernetes.io/projected/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-kube-api-access-xkg7l\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.247941 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srmxx\" (UniqueName: \"kubernetes.io/projected/cc7e3a89-0688-4bda-a76c-1b03e21b8419-kube-api-access-srmxx\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.247978 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-apiservice-cert\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.248004 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq58s\" (UniqueName: \"kubernetes.io/projected/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-kube-api-access-xq58s\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.248236 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-default-certificate\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.248275 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.248797 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr2k7\" (UniqueName: \"kubernetes.io/projected/54c1da50-0209-4285-bc15-91427e15241d-kube-api-access-sr2k7\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.248823 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82c02f94-72c8-43fd-b0f0-7c644d696b61-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.248978 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249024 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82c02f94-72c8-43fd-b0f0-7c644d696b61-trusted-ca\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249614 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f09051b7-ad38-4a84-89d7-ad84fd51fa1c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-922md\" (UID: \"f09051b7-ad38-4a84-89d7-ad84fd51fa1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249660 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8eb33408-9530-4b30-a357-9ecd1a094606-srv-cert\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249685 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x87lh\" (UniqueName: \"kubernetes.io/projected/0aa6b49a-8078-44f4-b1a9-2542d5bad461-kube-api-access-x87lh\") pod \"control-plane-machine-set-operator-78cbb6b69f-jbtd9\" (UID: \"0aa6b49a-8078-44f4-b1a9-2542d5bad461\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249715 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249740 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-registration-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249762 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gm6w\" (UniqueName: \"kubernetes.io/projected/02b54db7-da24-4fc2-88e2-cac233ca6323-kube-api-access-7gm6w\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249784 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-policies\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249804 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/82c02f94-72c8-43fd-b0f0-7c644d696b61-metrics-tls\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249826 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnfmt\" (UniqueName: \"kubernetes.io/projected/b65cc4af-8099-4f34-a93e-9ef9ae97996b-kube-api-access-hnfmt\") pod \"migrator-59844c95c7-s4474\" (UID: \"b65cc4af-8099-4f34-a93e-9ef9ae97996b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249846 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-metrics-tls\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249868 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/02b54db7-da24-4fc2-88e2-cac233ca6323-signing-key\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249931 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-certs\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249962 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-images\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.249990 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4kwh\" (UniqueName: \"kubernetes.io/projected/4cd9e183-ef71-4bc0-af54-df333c728cc4-kube-api-access-n4kwh\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250016 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250671 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-registration-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250738 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250784 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250818 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rccz9\" (UniqueName: \"kubernetes.io/projected/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-kube-api-access-rccz9\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250852 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-config-volume\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250878 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/02b54db7-da24-4fc2-88e2-cac233ca6323-signing-cabundle\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250914 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250948 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pckm\" (UniqueName: \"kubernetes.io/projected/82c02f94-72c8-43fd-b0f0-7c644d696b61-kube-api-access-7pckm\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.250988 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/421ae539-a9a0-4a76-be82-23ea497077ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251020 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3fdafa55-db38-4041-bb34-f319905ad733-profile-collector-cert\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251046 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-proxy-tls\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251120 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-dir\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251184 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/021e1574-af09-49bb-af67-1fc874bb0a06-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lw2hz\" (UID: \"021e1574-af09-49bb-af67-1fc874bb0a06\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251217 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9xtj\" (UniqueName: \"kubernetes.io/projected/3fdafa55-db38-4041-bb34-f319905ad733-kube-api-access-j9xtj\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251242 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/39f4fa09-765f-45c8-8206-290bf19fab29-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251288 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrdhj\" (UniqueName: \"kubernetes.io/projected/021e1574-af09-49bb-af67-1fc874bb0a06-kube-api-access-vrdhj\") pod \"multus-admission-controller-857f4d67dd-lw2hz\" (UID: \"021e1574-af09-49bb-af67-1fc874bb0a06\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251314 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cd9e183-ef71-4bc0-af54-df333c728cc4-service-ca-bundle\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251348 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/384b638e-e264-4492-8250-95540a24916c-config-volume\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251378 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4aea2580-1e20-4df6-b328-c1b99d986330-config\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251410 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/421ae539-a9a0-4a76-be82-23ea497077ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251449 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/384b638e-e264-4492-8250-95540a24916c-secret-volume\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251481 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-plugins-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251553 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25efaeca-af03-4a53-9955-993273376049-cert\") pod \"ingress-canary-gjp8v\" (UID: \"25efaeca-af03-4a53-9955-993273376049\") " pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251583 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6d48\" (UniqueName: \"kubernetes.io/projected/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-kube-api-access-j6d48\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251642 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbjdb\" (UniqueName: \"kubernetes.io/projected/384b638e-e264-4492-8250-95540a24916c-kube-api-access-bbjdb\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251665 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-node-bootstrap-token\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251695 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251722 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3fdafa55-db38-4041-bb34-f319905ad733-srv-cert\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251748 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f4fa09-765f-45c8-8206-290bf19fab29-config\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251774 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251803 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-socket-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251832 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-metrics-certs\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251859 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftnl2\" (UniqueName: \"kubernetes.io/projected/f09051b7-ad38-4a84-89d7-ad84fd51fa1c-kube-api-access-ftnl2\") pod \"package-server-manager-789f6589d5-922md\" (UID: \"f09051b7-ad38-4a84-89d7-ad84fd51fa1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251886 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251583 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/82c02f94-72c8-43fd-b0f0-7c644d696b61-trusted-ca\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251930 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc7e3a89-0688-4bda-a76c-1b03e21b8419-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.251959 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvznn\" (UniqueName: \"kubernetes.io/projected/8eb33408-9530-4b30-a357-9ecd1a094606-kube-api-access-dvznn\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252223 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zcgc\" (UniqueName: \"kubernetes.io/projected/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-kube-api-access-8zcgc\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252282 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252328 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-csi-data-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252403 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hl5x2\" (UniqueName: \"kubernetes.io/projected/25efaeca-af03-4a53-9955-993273376049-kube-api-access-hl5x2\") pod \"ingress-canary-gjp8v\" (UID: \"25efaeca-af03-4a53-9955-993273376049\") " pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252477 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-stats-auth\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252527 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252571 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39f4fa09-765f-45c8-8206-290bf19fab29-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252629 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v4x8\" (UniqueName: \"kubernetes.io/projected/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-kube-api-access-7v4x8\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252692 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252745 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252785 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252838 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-proxy-tls\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252889 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0aa6b49a-8078-44f4-b1a9-2542d5bad461-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jbtd9\" (UID: \"0aa6b49a-8078-44f4-b1a9-2542d5bad461\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.252941 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8eb33408-9530-4b30-a357-9ecd1a094606-profile-collector-cert\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253011 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-webhook-cert\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253092 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl5k8\" (UniqueName: \"kubernetes.io/projected/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-kube-api-access-gl5k8\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253134 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc7e3a89-0688-4bda-a76c-1b03e21b8419-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253204 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-metrics-tls\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253222 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253274 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/421ae539-a9a0-4a76-be82-23ea497077ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253324 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-tmpfs\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253378 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4aea2580-1e20-4df6-b328-c1b99d986330-serving-cert\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253436 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrdkn\" (UniqueName: \"kubernetes.io/projected/4aea2580-1e20-4df6-b328-c1b99d986330-kube-api-access-qrdkn\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253499 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-mountpoint-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.253817 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-mountpoint-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.254039 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/384b638e-e264-4492-8250-95540a24916c-config-volume\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.254201 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-csi-data-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.254589 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-apiservice-cert\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.254623 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.256157 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-certs\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.257080 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-images\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.257243 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f09051b7-ad38-4a84-89d7-ad84fd51fa1c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-922md\" (UID: \"f09051b7-ad38-4a84-89d7-ad84fd51fa1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.257392 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4aea2580-1e20-4df6-b328-c1b99d986330-config\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.257908 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/421ae539-a9a0-4a76-be82-23ea497077ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.258493 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-webhook-cert\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.258773 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/82c02f94-72c8-43fd-b0f0-7c644d696b61-metrics-tls\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.259220 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-tmpfs\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.259640 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3fdafa55-db38-4041-bb34-f319905ad733-srv-cert\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.260540 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39f4fa09-765f-45c8-8206-290bf19fab29-config\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.261091 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-socket-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.261201 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc7e3a89-0688-4bda-a76c-1b03e21b8419-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.262611 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-config-volume\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.262770 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.262812 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/02b54db7-da24-4fc2-88e2-cac233ca6323-signing-cabundle\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.262916 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-dir\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.263383 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/02b54db7-da24-4fc2-88e2-cac233ca6323-signing-key\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.264066 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:54.764040023 +0000 UTC m=+125.851453868 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.264086 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25efaeca-af03-4a53-9955-993273376049-cert\") pod \"ingress-canary-gjp8v\" (UID: \"25efaeca-af03-4a53-9955-993273376049\") " pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.264424 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.264823 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.264889 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8eb33408-9530-4b30-a357-9ecd1a094606-profile-collector-cert\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.265124 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/54c1da50-0209-4285-bc15-91427e15241d-plugins-dir\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.265551 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-default-certificate\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.265957 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-metrics-certs\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.266192 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-node-bootstrap-token\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.266569 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/421ae539-a9a0-4a76-be82-23ea497077ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.266587 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-proxy-tls\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.267745 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.268013 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8eb33408-9530-4b30-a357-9ecd1a094606-srv-cert\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.268309 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0aa6b49a-8078-44f4-b1a9-2542d5bad461-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jbtd9\" (UID: \"0aa6b49a-8078-44f4-b1a9-2542d5bad461\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.270306 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3fdafa55-db38-4041-bb34-f319905ad733-profile-collector-cert\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.271075 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4cd9e183-ef71-4bc0-af54-df333c728cc4-stats-auth\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.271415 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-proxy-tls\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.273668 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/021e1574-af09-49bb-af67-1fc874bb0a06-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lw2hz\" (UID: \"021e1574-af09-49bb-af67-1fc874bb0a06\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.274263 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39f4fa09-765f-45c8-8206-290bf19fab29-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.274643 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/384b638e-e264-4492-8250-95540a24916c-secret-volume\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.279527 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4aea2580-1e20-4df6-b328-c1b99d986330-serving-cert\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.279951 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4cd9e183-ef71-4bc0-af54-df333c728cc4-service-ca-bundle\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.280789 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.281112 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-policies\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.281838 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.281899 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.283454 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.289544 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc7e3a89-0688-4bda-a76c-1b03e21b8419-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.303793 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.310004 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.311382 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.311631 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.311847 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.312430 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.312482 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.312526 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.313265 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.341318 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-bound-sa-token\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.353387 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.354317 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.355768 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:54.855737859 +0000 UTC m=+125.943151734 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.370014 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p68r\" (UniqueName: \"kubernetes.io/projected/efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9-kube-api-access-8p68r\") pod \"apiserver-7bbb656c7d-pwbtq\" (UID: \"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.382617 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.392308 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.407121 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m6dk\" (UniqueName: \"kubernetes.io/projected/2143d413-3556-4232-a3d6-145f94d98606-kube-api-access-4m6dk\") pod \"console-operator-58897d9998-lr8cn\" (UID: \"2143d413-3556-4232-a3d6-145f94d98606\") " pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.420050 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snn7d\" (UniqueName: \"kubernetes.io/projected/45dd6f4b-7432-459c-ba00-b849e384eaae-kube-api-access-snn7d\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.423548 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrp77\" (UniqueName: \"kubernetes.io/projected/79a36fbf-eb6c-442f-b6f0-d4a5f7435dde-kube-api-access-qrp77\") pod \"downloads-7954f5f757-54gh9\" (UID: \"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde\") " pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.430538 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.433750 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" event={"ID":"91fd9e56-9836-4427-b58e-9c0742895c7a","Type":"ContainerStarted","Data":"922b722f2166d4b37cc80ebb053ccbb65ee0e576dbf0d722e2778208109b6d1c"} Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.439360 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.441761 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd752\" (UniqueName: \"kubernetes.io/projected/edb4a604-6b03-48d4-b5ab-09e266b5eef8-kube-api-access-hd752\") pod \"controller-manager-879f6c89f-7pxnw\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.457385 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.458213 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:54.958155622 +0000 UTC m=+126.045569467 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.462635 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq58r\" (UniqueName: \"kubernetes.io/projected/a7ad2819-235d-4f6d-b731-6f8e31db0b13-kube-api-access-tq58r\") pod \"machine-api-operator-5694c8668f-xcd5g\" (UID: \"a7ad2819-235d-4f6d-b731-6f8e31db0b13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.479071 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55j98\" (UniqueName: \"kubernetes.io/projected/ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9-kube-api-access-55j98\") pod \"openshift-apiserver-operator-796bbdcf4f-xh8vq\" (UID: \"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.484745 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.507067 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7h8g\" (UniqueName: \"kubernetes.io/projected/df73faab-d681-4ffe-b550-331fa336bc10-kube-api-access-l7h8g\") pod \"authentication-operator-69f744f599-dd6rs\" (UID: \"df73faab-d681-4ffe-b550-331fa336bc10\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.518520 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlkkd\" (UniqueName: \"kubernetes.io/projected/08663495-9331-4b6f-b82a-67b308a9afa3-kube-api-access-dlkkd\") pod \"console-f9d7485db-z2lzl\" (UID: \"08663495-9331-4b6f-b82a-67b308a9afa3\") " pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.550126 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8p76\" (UniqueName: \"kubernetes.io/projected/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-kube-api-access-v8p76\") pod \"route-controller-manager-6576b87f9c-2966r\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.559708 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.559853 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.059828816 +0000 UTC m=+126.147242641 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.559927 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.560290 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.060281198 +0000 UTC m=+126.147695033 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.563751 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxsgk\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-kube-api-access-wxsgk\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.583692 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/45dd6f4b-7432-459c-ba00-b849e384eaae-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mwwhj\" (UID: \"45dd6f4b-7432-459c-ba00-b849e384eaae\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.588415 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.600403 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.601163 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcrl6\" (UniqueName: \"kubernetes.io/projected/75a32638-e479-4025-abd5-d00533347443-kube-api-access-kcrl6\") pod \"openshift-config-operator-7777fb866f-dwzsq\" (UID: \"75a32638-e479-4025-abd5-d00533347443\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.616370 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.627062 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.646936 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkg7l\" (UniqueName: \"kubernetes.io/projected/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-kube-api-access-xkg7l\") pod \"marketplace-operator-79b997595-qpc29\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.647225 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.659125 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq58s\" (UniqueName: \"kubernetes.io/projected/ee856066-85d4-41a5-86eb-9ab5e01ca0c6-kube-api-access-xq58s\") pod \"dns-default-slprg\" (UID: \"ee856066-85d4-41a5-86eb-9ab5e01ca0c6\") " pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.660101 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.660640 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.661085 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.161065928 +0000 UTC m=+126.248479763 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.669513 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.676116 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.684559 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-slprg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.701235 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr2k7\" (UniqueName: \"kubernetes.io/projected/54c1da50-0209-4285-bc15-91427e15241d-kube-api-access-sr2k7\") pod \"csi-hostpathplugin-xvxqg\" (UID: \"54c1da50-0209-4285-bc15-91427e15241d\") " pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.721702 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/82c02f94-72c8-43fd-b0f0-7c644d696b61-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.744689 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srmxx\" (UniqueName: \"kubernetes.io/projected/cc7e3a89-0688-4bda-a76c-1b03e21b8419-kube-api-access-srmxx\") pod \"kube-storage-version-migrator-operator-b67b599dd-hbxst\" (UID: \"cc7e3a89-0688-4bda-a76c-1b03e21b8419\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.751251 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gm6w\" (UniqueName: \"kubernetes.io/projected/02b54db7-da24-4fc2-88e2-cac233ca6323-kube-api-access-7gm6w\") pod \"service-ca-9c57cc56f-7bmls\" (UID: \"02b54db7-da24-4fc2-88e2-cac233ca6323\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.759151 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x87lh\" (UniqueName: \"kubernetes.io/projected/0aa6b49a-8078-44f4-b1a9-2542d5bad461-kube-api-access-x87lh\") pod \"control-plane-machine-set-operator-78cbb6b69f-jbtd9\" (UID: \"0aa6b49a-8078-44f4-b1a9-2542d5bad461\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.763560 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.763890 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.263876252 +0000 UTC m=+126.351290087 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.764489 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.777634 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4kwh\" (UniqueName: \"kubernetes.io/projected/4cd9e183-ef71-4bc0-af54-df333c728cc4-kube-api-access-n4kwh\") pod \"router-default-5444994796-xvw2d\" (UID: \"4cd9e183-ef71-4bc0-af54-df333c728cc4\") " pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.783401 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.799945 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvznn\" (UniqueName: \"kubernetes.io/projected/8eb33408-9530-4b30-a357-9ecd1a094606-kube-api-access-dvznn\") pod \"olm-operator-6b444d44fb-c5jnv\" (UID: \"8eb33408-9530-4b30-a357-9ecd1a094606\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.800135 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.810814 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.816433 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.838066 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hl5x2\" (UniqueName: \"kubernetes.io/projected/25efaeca-af03-4a53-9955-993273376049-kube-api-access-hl5x2\") pod \"ingress-canary-gjp8v\" (UID: \"25efaeca-af03-4a53-9955-993273376049\") " pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.865679 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.866093 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.366029679 +0000 UTC m=+126.453443514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.866212 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.866509 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnfmt\" (UniqueName: \"kubernetes.io/projected/b65cc4af-8099-4f34-a93e-9ef9ae97996b-kube-api-access-hnfmt\") pod \"migrator-59844c95c7-s4474\" (UID: \"b65cc4af-8099-4f34-a93e-9ef9ae97996b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.866675 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.366668536 +0000 UTC m=+126.454082361 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.877097 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.882639 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/421ae539-a9a0-4a76-be82-23ea497077ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-6z8gs\" (UID: \"421ae539-a9a0-4a76-be82-23ea497077ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.883833 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.905976 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl5k8\" (UniqueName: \"kubernetes.io/projected/80b3c037-4d1a-4be7-81d6-1c33f1801bd6-kube-api-access-gl5k8\") pod \"machine-config-controller-84d6567774-kfqrq\" (UID: \"80b3c037-4d1a-4be7-81d6-1c33f1801bd6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.919676 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6d48\" (UniqueName: \"kubernetes.io/projected/eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20-kube-api-access-j6d48\") pod \"packageserver-d55dfcdfc-h6gqk\" (UID: \"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.940128 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbjdb\" (UniqueName: \"kubernetes.io/projected/384b638e-e264-4492-8250-95540a24916c-kube-api-access-bbjdb\") pod \"collect-profiles-29493375-d96dp\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.960637 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrdkn\" (UniqueName: \"kubernetes.io/projected/4aea2580-1e20-4df6-b328-c1b99d986330-kube-api-access-qrdkn\") pod \"service-ca-operator-777779d784-pvrr6\" (UID: \"4aea2580-1e20-4df6-b328-c1b99d986330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.967611 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.967781 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.467747303 +0000 UTC m=+126.555161138 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.967958 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:54 crc kubenswrapper[4685]: E0128 12:22:54.968413 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.468400241 +0000 UTC m=+126.555814076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.972745 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.977608 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zcgc\" (UniqueName: \"kubernetes.io/projected/b3a33bb2-4fb3-4988-8b43-62a9bdd2e659-kube-api-access-8zcgc\") pod \"machine-config-server-ftmbf\" (UID: \"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659\") " pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.978882 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9xtj\" (UniqueName: \"kubernetes.io/projected/3fdafa55-db38-4041-bb34-f319905ad733-kube-api-access-j9xtj\") pod \"catalog-operator-68c6474976-6wksk\" (UID: \"3fdafa55-db38-4041-bb34-f319905ad733\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.997295 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftnl2\" (UniqueName: \"kubernetes.io/projected/f09051b7-ad38-4a84-89d7-ad84fd51fa1c-kube-api-access-ftnl2\") pod \"package-server-manager-789f6589d5-922md\" (UID: \"f09051b7-ad38-4a84-89d7-ad84fd51fa1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:54 crc kubenswrapper[4685]: I0128 12:22:54.998935 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-gjp8v" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.017074 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/39f4fa09-765f-45c8-8206-290bf19fab29-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-vcrmr\" (UID: \"39f4fa09-765f-45c8-8206-290bf19fab29\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.052816 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrdhj\" (UniqueName: \"kubernetes.io/projected/021e1574-af09-49bb-af67-1fc874bb0a06-kube-api-access-vrdhj\") pod \"multus-admission-controller-857f4d67dd-lw2hz\" (UID: \"021e1574-af09-49bb-af67-1fc874bb0a06\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.058303 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-68qg7"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.059553 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.064015 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.070815 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-6rxh9"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.072087 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zp7xc"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.072499 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.072923 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.572902101 +0000 UTC m=+126.660315936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.076562 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.077627 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rccz9\" (UniqueName: \"kubernetes.io/projected/af98b7ab-bf20-4cb2-85ec-1eb757b8db45-kube-api-access-rccz9\") pod \"machine-config-operator-74547568cd-xkxzp\" (UID: \"af98b7ab-bf20-4cb2-85ec-1eb757b8db45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.091345 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.093907 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v4x8\" (UniqueName: \"kubernetes.io/projected/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-kube-api-access-7v4x8\") pod \"oauth-openshift-558db77b4-4gdsr\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.097575 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.103785 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pckm\" (UniqueName: \"kubernetes.io/projected/82c02f94-72c8-43fd-b0f0-7c644d696b61-kube-api-access-7pckm\") pod \"ingress-operator-5b745b69d9-kvphq\" (UID: \"82c02f94-72c8-43fd-b0f0-7c644d696b61\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.127054 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.132305 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.136483 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.138411 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.146500 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.154015 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7pxnw"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.154077 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-lr8cn"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.155372 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.159063 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-xcd5g"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.159498 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.174212 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.174532 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.674518513 +0000 UTC m=+126.761932348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.187327 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.193450 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-54gh9"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.199636 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.203365 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.205762 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.217280 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.226560 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.248360 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-ftmbf" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.271892 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.275564 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.277441 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.777417959 +0000 UTC m=+126.864831794 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.318627 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.347620 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-z2lzl"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.351752 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.351812 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-slprg"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.352013 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7bmls"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.370802 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.379005 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.379500 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.879479824 +0000 UTC m=+126.966893659 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.380639 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-gjp8v"] Jan 28 12:22:55 crc kubenswrapper[4685]: W0128 12:22:55.381244 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91aac1a9_a4dd_4668_af62_e76501c860ac.slice/crio-d99e0007889e61ae1a9fd84fc1df16890b6bbc389b8dfcf50e64362affab29d9 WatchSource:0}: Error finding container d99e0007889e61ae1a9fd84fc1df16890b6bbc389b8dfcf50e64362affab29d9: Status 404 returned error can't find the container with id d99e0007889e61ae1a9fd84fc1df16890b6bbc389b8dfcf50e64362affab29d9 Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.421019 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qpc29"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.422981 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.425141 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dd6rs"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.434806 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.440728 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" event={"ID":"45dd6f4b-7432-459c-ba00-b849e384eaae","Type":"ContainerStarted","Data":"cfa603145ba58b250a3c8b33f6ce77a4714f0a57214cea98cc8fc0bdaa4f7173"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.442654 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" event={"ID":"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9","Type":"ContainerStarted","Data":"3980d41d3e19c5d7ba941c43e18414019c351d322ac26e6e257c5c79e50a2ba8"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.443393 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" event={"ID":"6d7a1a22-14a6-419e-b4f1-ebf636f248f9","Type":"ContainerStarted","Data":"21fb185e8cadb0d17dc3976f09f12739b022a9d7cd4ec9c6c82073e7b80c3800"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.444076 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" event={"ID":"31a31a5b-65f9-4997-b083-16382c0f6c11","Type":"ContainerStarted","Data":"f0a21bf324c9027131ebd234164f9498cf7e29849c0fe11be44f158eac13f19d"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.444806 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" event={"ID":"75a32638-e479-4025-abd5-d00533347443","Type":"ContainerStarted","Data":"3dba3346a078d09a202db6a911423fd24d49c42e8dbc0249d95fffa5ab9666ff"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.446724 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z2lzl" event={"ID":"08663495-9331-4b6f-b82a-67b308a9afa3","Type":"ContainerStarted","Data":"b185a380712082807210e55bac4be6060c72f4cab537194bf23299c341b80286"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.447731 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" event={"ID":"91fd9e56-9836-4427-b58e-9c0742895c7a","Type":"ContainerStarted","Data":"c7051ef9a36f2757ddf397ff262a97acef8a1ba0e956fbc122e27df4087794bc"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.448396 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" event={"ID":"9df99517-c3a0-4fde-a93f-24b84a33615a","Type":"ContainerStarted","Data":"bc6bea370d0fe46f21713bc2107a3d63a746ef47662183755bd4b3bca65c9127"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.450592 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" event={"ID":"edb4a604-6b03-48d4-b5ab-09e266b5eef8","Type":"ContainerStarted","Data":"282c7f186f9f420ee68b24b3cc1e4e502f363b0f9bd49adaec907300e8d49178"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.451300 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" event={"ID":"b22a7729-d10a-412c-8df1-30992ba607b0","Type":"ContainerStarted","Data":"6d82f0d74128ed04a7e49d445c248293b1fdfdc697b6dd99697386dc0f9a485d"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.452710 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" event={"ID":"d97a0072-f1ae-4674-8ad6-aab9c60155f3","Type":"ContainerStarted","Data":"e971bf85099e48b294830d66adcb01183ceb25b09389157d03c74e70a8fed32c"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.453396 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" event={"ID":"a7ad2819-235d-4f6d-b731-6f8e31db0b13","Type":"ContainerStarted","Data":"7b17017e60d8a0b77a7e4e802c4bdaa9e64d8ffc2c6eb5e3e645ad2f33f6a039"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.453940 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" event={"ID":"2143d413-3556-4232-a3d6-145f94d98606","Type":"ContainerStarted","Data":"c6dcda98c0e7b1e27f8dc3b76788efea83e9938997caa1a18f37b879850a7194"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.454587 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" event={"ID":"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9","Type":"ContainerStarted","Data":"41cecc36c1e5d282244a0ecabede36e6e72d479c34be8f096617e90874d21a78"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.455319 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-xvw2d" event={"ID":"4cd9e183-ef71-4bc0-af54-df333c728cc4","Type":"ContainerStarted","Data":"fb1b208fcff3484bd593415d8bf245f3ec20da26d8fd740a1c413f6e2fe44750"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.455925 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-54gh9" event={"ID":"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde","Type":"ContainerStarted","Data":"baa7299f77589c7f5c7faeb03fcf1aab667328f63a4121ce25e26984f565ac3d"} Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.480957 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.481117 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.981076645 +0000 UTC m=+127.068490480 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.481437 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.481891 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:55.981882607 +0000 UTC m=+127.069296442 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.535758 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.582495 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.582971 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.082950604 +0000 UTC m=+127.170364439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.599929 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xvxqg"] Jan 28 12:22:55 crc kubenswrapper[4685]: W0128 12:22:55.625515 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8eb33408_9530_4b30_a357_9ecd1a094606.slice/crio-4ee8d82b66dce997f4355bb6ec8ae60c24c305e8e8013df4eda2c1831e194a7c WatchSource:0}: Error finding container 4ee8d82b66dce997f4355bb6ec8ae60c24c305e8e8013df4eda2c1831e194a7c: Status 404 returned error can't find the container with id 4ee8d82b66dce997f4355bb6ec8ae60c24c305e8e8013df4eda2c1831e194a7c Jan 28 12:22:55 crc kubenswrapper[4685]: W0128 12:22:55.660023 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee856066_85d4_41a5_86eb_9ab5e01ca0c6.slice/crio-2b209789ae2953eaec3ce8a778f23754ad7dc97645164a4666bba7c99e013550 WatchSource:0}: Error finding container 2b209789ae2953eaec3ce8a778f23754ad7dc97645164a4666bba7c99e013550: Status 404 returned error can't find the container with id 2b209789ae2953eaec3ce8a778f23754ad7dc97645164a4666bba7c99e013550 Jan 28 12:22:55 crc kubenswrapper[4685]: W0128 12:22:55.673442 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25efaeca_af03_4a53_9955_993273376049.slice/crio-116621bac6087857dc39c04e5d313801092af9c54f0610d5b5da44419af228eb WatchSource:0}: Error finding container 116621bac6087857dc39c04e5d313801092af9c54f0610d5b5da44419af228eb: Status 404 returned error can't find the container with id 116621bac6087857dc39c04e5d313801092af9c54f0610d5b5da44419af228eb Jan 28 12:22:55 crc kubenswrapper[4685]: W0128 12:22:55.674906 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0aa6b49a_8078_44f4_b1a9_2542d5bad461.slice/crio-5e733df9ecfbc55f611006578f56ff1dbcf578dbdb4c16c297e30e785d2946cb WatchSource:0}: Error finding container 5e733df9ecfbc55f611006578f56ff1dbcf578dbdb4c16c297e30e785d2946cb: Status 404 returned error can't find the container with id 5e733df9ecfbc55f611006578f56ff1dbcf578dbdb4c16c297e30e785d2946cb Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.685541 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.686324 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.186305163 +0000 UTC m=+127.273718988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: W0128 12:22:55.687595 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf73faab_d681_4ffe_b550_331fa336bc10.slice/crio-706ab6c42c9bacc21918c69664429209b5dea6ddbc4894875a3491f4542b05ca WatchSource:0}: Error finding container 706ab6c42c9bacc21918c69664429209b5dea6ddbc4894875a3491f4542b05ca: Status 404 returned error can't find the container with id 706ab6c42c9bacc21918c69664429209b5dea6ddbc4894875a3491f4542b05ca Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.786921 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.787459 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.287437882 +0000 UTC m=+127.374851717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.889815 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.890514 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.390492773 +0000 UTC m=+127.477906608 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.910901 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr"] Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.991707 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.991887 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.491864759 +0000 UTC m=+127.579278594 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:55 crc kubenswrapper[4685]: I0128 12:22:55.991919 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:55 crc kubenswrapper[4685]: E0128 12:22:55.992424 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.492407983 +0000 UTC m=+127.579821828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.093429 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.094374 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.594355524 +0000 UTC m=+127.681769359 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: W0128 12:22:56.153723 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39f4fa09_765f_45c8_8206_290bf19fab29.slice/crio-9d7062dccb6ccccab9bb23c7a211271ac7b00c3835070cde0513eca1733672ee WatchSource:0}: Error finding container 9d7062dccb6ccccab9bb23c7a211271ac7b00c3835070cde0513eca1733672ee: Status 404 returned error can't find the container with id 9d7062dccb6ccccab9bb23c7a211271ac7b00c3835070cde0513eca1733672ee Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.196087 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.196624 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.696599953 +0000 UTC m=+127.784013788 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: W0128 12:22:56.230777 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3a33bb2_4fb3_4988_8b43_62a9bdd2e659.slice/crio-cde9d7efdf20b5acd754914bfb1715e9178950b5c36e5f40f2f114581d1b4717 WatchSource:0}: Error finding container cde9d7efdf20b5acd754914bfb1715e9178950b5c36e5f40f2f114581d1b4717: Status 404 returned error can't find the container with id cde9d7efdf20b5acd754914bfb1715e9178950b5c36e5f40f2f114581d1b4717 Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.297224 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.297556 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.797522467 +0000 UTC m=+127.884936302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.297681 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.298139 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.798121463 +0000 UTC m=+127.885535298 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.369089 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4gdsr"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.399568 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.399906 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:56.899887078 +0000 UTC m=+127.987300913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.466806 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" event={"ID":"54c1da50-0209-4285-bc15-91427e15241d","Type":"ContainerStarted","Data":"7cd7e66489d83bdcff927bfce200b18cef2acb8047f46338e3020a4f88b4f3a1"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.469184 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" event={"ID":"39f4fa09-765f-45c8-8206-290bf19fab29","Type":"ContainerStarted","Data":"9d7062dccb6ccccab9bb23c7a211271ac7b00c3835070cde0513eca1733672ee"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.470878 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" event={"ID":"02b54db7-da24-4fc2-88e2-cac233ca6323","Type":"ContainerStarted","Data":"d648100cc3aff3ba24814ce1f76926e83f498cb4c95547f0c3d1eef5ad5fa047"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.471733 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" event={"ID":"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7","Type":"ContainerStarted","Data":"c44947ee18525b85de89475acda4ceb85ca735e0bafb52fba639d069c9b217c0"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.473204 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" event={"ID":"421ae539-a9a0-4a76-be82-23ea497077ad","Type":"ContainerStarted","Data":"ce9131496ccbc8994c5e410cd8b0b08b62ac1210f53c1557fb0996eb5d93c0c3"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.474156 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerStarted","Data":"8764eec28c9522ea30e1bd7d699ac9753a20358c5ea924784f39ee0e7761e885"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.477544 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" event={"ID":"cc7e3a89-0688-4bda-a76c-1b03e21b8419","Type":"ContainerStarted","Data":"a1eb0be47c32257b640d9b3d2cec5dfa1293d1734cb3a61fb951e8e145e3da4e"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.479590 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" event={"ID":"8eb33408-9530-4b30-a357-9ecd1a094606","Type":"ContainerStarted","Data":"4ee8d82b66dce997f4355bb6ec8ae60c24c305e8e8013df4eda2c1831e194a7c"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.481157 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-slprg" event={"ID":"ee856066-85d4-41a5-86eb-9ab5e01ca0c6","Type":"ContainerStarted","Data":"2b209789ae2953eaec3ce8a778f23754ad7dc97645164a4666bba7c99e013550"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.482964 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" event={"ID":"0aa6b49a-8078-44f4-b1a9-2542d5bad461","Type":"ContainerStarted","Data":"5e733df9ecfbc55f611006578f56ff1dbcf578dbdb4c16c297e30e785d2946cb"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.484548 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" event={"ID":"df73faab-d681-4ffe-b550-331fa336bc10","Type":"ContainerStarted","Data":"706ab6c42c9bacc21918c69664429209b5dea6ddbc4894875a3491f4542b05ca"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.485806 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" event={"ID":"91aac1a9-a4dd-4668-af62-e76501c860ac","Type":"ContainerStarted","Data":"d99e0007889e61ae1a9fd84fc1df16890b6bbc389b8dfcf50e64362affab29d9"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.486478 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-ftmbf" event={"ID":"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659","Type":"ContainerStarted","Data":"cde9d7efdf20b5acd754914bfb1715e9178950b5c36e5f40f2f114581d1b4717"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.487556 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-gjp8v" event={"ID":"25efaeca-af03-4a53-9955-993273376049","Type":"ContainerStarted","Data":"116621bac6087857dc39c04e5d313801092af9c54f0610d5b5da44419af228eb"} Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.501218 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.501673 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.001649454 +0000 UTC m=+128.089063289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: W0128 12:22:56.529048 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b002972_6a89_4a6a_9839_a69e5e9ff3e5.slice/crio-8fc38eb5ca5dea78125ca1e8c5d30d5b4cba1772038fa9a467188c194f6250f2 WatchSource:0}: Error finding container 8fc38eb5ca5dea78125ca1e8c5d30d5b4cba1772038fa9a467188c194f6250f2: Status 404 returned error can't find the container with id 8fc38eb5ca5dea78125ca1e8c5d30d5b4cba1772038fa9a467188c194f6250f2 Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.606689 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.607029 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.107003826 +0000 UTC m=+128.194417661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.607326 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.607659 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.107651304 +0000 UTC m=+128.195065139 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.637589 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.651449 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.708272 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.713093 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.213061108 +0000 UTC m=+128.300474943 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.731495 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.741313 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.745887 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.747673 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.809513 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.811304 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lw2hz"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.813106 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.814878 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.814937 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.314919777 +0000 UTC m=+128.402333612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.849116 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq"] Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.915870 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.916120 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.416083216 +0000 UTC m=+128.503497092 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:56 crc kubenswrapper[4685]: I0128 12:22:56.916300 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:56 crc kubenswrapper[4685]: E0128 12:22:56.916778 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.416759855 +0000 UTC m=+128.504173690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.017901 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.018113 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.518073079 +0000 UTC m=+128.605486934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.018635 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.019250 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.51923399 +0000 UTC m=+128.606647825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: W0128 12:22:57.028296 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb65cc4af_8099_4f34_a93e_9ef9ae97996b.slice/crio-c713375213cb5ce945f555bffbc0be5614705226564846d253fc33d93ca79dc8 WatchSource:0}: Error finding container c713375213cb5ce945f555bffbc0be5614705226564846d253fc33d93ca79dc8: Status 404 returned error can't find the container with id c713375213cb5ce945f555bffbc0be5614705226564846d253fc33d93ca79dc8 Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.094620 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.150488 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.151297 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.651275268 +0000 UTC m=+128.738689103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.252653 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.253458 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.753441677 +0000 UTC m=+128.840855512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.354305 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.354502 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.854461403 +0000 UTC m=+128.941875278 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.354578 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.355038 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.855024038 +0000 UTC m=+128.942437873 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.456268 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.456550 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:57.956510767 +0000 UTC m=+129.043924642 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.496362 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" event={"ID":"4b002972-6a89-4a6a-9839-a69e5e9ff3e5","Type":"ContainerStarted","Data":"8fc38eb5ca5dea78125ca1e8c5d30d5b4cba1772038fa9a467188c194f6250f2"} Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.498843 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-xvw2d" event={"ID":"4cd9e183-ef71-4bc0-af54-df333c728cc4","Type":"ContainerStarted","Data":"c151842df871f79393089464a2d989d4f971b029f367ba63afd066f180d99ff8"} Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.500406 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" event={"ID":"b65cc4af-8099-4f34-a93e-9ef9ae97996b","Type":"ContainerStarted","Data":"c713375213cb5ce945f555bffbc0be5614705226564846d253fc33d93ca79dc8"} Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.559332 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.559874 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.059845545 +0000 UTC m=+129.147259410 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: W0128 12:22:57.634469 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeccf5b8d_9fb2_4d8d_81e3_08265c0bdb20.slice/crio-7affb3a5081d09b6ed2cbcd43d17909197735e88a6a9ebe00578d1d827250b29 WatchSource:0}: Error finding container 7affb3a5081d09b6ed2cbcd43d17909197735e88a6a9ebe00578d1d827250b29: Status 404 returned error can't find the container with id 7affb3a5081d09b6ed2cbcd43d17909197735e88a6a9ebe00578d1d827250b29 Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.660341 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.660858 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.16081379 +0000 UTC m=+129.248227665 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.661091 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.661474 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.161462427 +0000 UTC m=+129.248876272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.762862 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.763086 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.263044109 +0000 UTC m=+129.350457984 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.763564 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.764136 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.264112597 +0000 UTC m=+129.351526492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: W0128 12:22:57.768126 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3fdafa55_db38_4041_bb34_f319905ad733.slice/crio-f7dd86c29bba855738578f5ff03d9e7be18a00c0e3594028380f64871500f1b6 WatchSource:0}: Error finding container f7dd86c29bba855738578f5ff03d9e7be18a00c0e3594028380f64871500f1b6: Status 404 returned error can't find the container with id f7dd86c29bba855738578f5ff03d9e7be18a00c0e3594028380f64871500f1b6 Jan 28 12:22:57 crc kubenswrapper[4685]: W0128 12:22:57.771789 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4aea2580_1e20_4df6_b328_c1b99d986330.slice/crio-9aea54539541274ee92b1b2e7775772686d333e0ccc1f1ebcee748ba44d6c858 WatchSource:0}: Error finding container 9aea54539541274ee92b1b2e7775772686d333e0ccc1f1ebcee748ba44d6c858: Status 404 returned error can't find the container with id 9aea54539541274ee92b1b2e7775772686d333e0ccc1f1ebcee748ba44d6c858 Jan 28 12:22:57 crc kubenswrapper[4685]: W0128 12:22:57.780217 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod82c02f94_72c8_43fd_b0f0_7c644d696b61.slice/crio-98005e71c3f82835c669a0b5d4ccc078896ba7d5d0cf47d689563ad96f676d05 WatchSource:0}: Error finding container 98005e71c3f82835c669a0b5d4ccc078896ba7d5d0cf47d689563ad96f676d05: Status 404 returned error can't find the container with id 98005e71c3f82835c669a0b5d4ccc078896ba7d5d0cf47d689563ad96f676d05 Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.864736 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.864905 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.364872446 +0000 UTC m=+129.452286291 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.865099 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.865483 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.365472332 +0000 UTC m=+129.452886167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:57 crc kubenswrapper[4685]: I0128 12:22:57.966123 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:57 crc kubenswrapper[4685]: E0128 12:22:57.966660 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.466642333 +0000 UTC m=+129.554056168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.067232 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.067716 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.56769771 +0000 UTC m=+129.655111545 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.168417 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.168585 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.668562586 +0000 UTC m=+129.755976421 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.168805 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.169607 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.669595248 +0000 UTC m=+129.757009083 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.271148 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.271970 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.771934335 +0000 UTC m=+129.859348200 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.372648 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.373151 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.873127387 +0000 UTC m=+129.960541222 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.473443 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.473592 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.973571595 +0000 UTC m=+130.060985430 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.473759 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.474051 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:58.97404378 +0000 UTC m=+130.061457615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.520556 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" event={"ID":"021e1574-af09-49bb-af67-1fc874bb0a06","Type":"ContainerStarted","Data":"42001610a361231a57dad2117f0762b565dcc49c6b2ad7f83afc70bb67ea50fd"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.524779 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" event={"ID":"f09051b7-ad38-4a84-89d7-ad84fd51fa1c","Type":"ContainerStarted","Data":"70e1c04a3d8c1985797137680f290431998c13b4de02f7cb06f9ef9a348423e0"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.528532 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" event={"ID":"4aea2580-1e20-4df6-b328-c1b99d986330","Type":"ContainerStarted","Data":"9aea54539541274ee92b1b2e7775772686d333e0ccc1f1ebcee748ba44d6c858"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.530982 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" event={"ID":"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20","Type":"ContainerStarted","Data":"7affb3a5081d09b6ed2cbcd43d17909197735e88a6a9ebe00578d1d827250b29"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.532144 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" event={"ID":"80b3c037-4d1a-4be7-81d6-1c33f1801bd6","Type":"ContainerStarted","Data":"e04b565f2aa29ab4a43f6928376d54ca2110a4e13beef5789d876f99ff9cc345"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.534324 4685 generic.go:334] "Generic (PLEG): container finished" podID="b22a7729-d10a-412c-8df1-30992ba607b0" containerID="b3cacea32b5de818b11c2d41fc33559cfe549850eca9bdf6cccdf5b607d368f5" exitCode=0 Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.534372 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" event={"ID":"b22a7729-d10a-412c-8df1-30992ba607b0","Type":"ContainerDied","Data":"b3cacea32b5de818b11c2d41fc33559cfe549850eca9bdf6cccdf5b607d368f5"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.535064 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" event={"ID":"3fdafa55-db38-4041-bb34-f319905ad733","Type":"ContainerStarted","Data":"f7dd86c29bba855738578f5ff03d9e7be18a00c0e3594028380f64871500f1b6"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.535899 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" event={"ID":"82c02f94-72c8-43fd-b0f0-7c644d696b61","Type":"ContainerStarted","Data":"98005e71c3f82835c669a0b5d4ccc078896ba7d5d0cf47d689563ad96f676d05"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.542061 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" event={"ID":"af98b7ab-bf20-4cb2-85ec-1eb757b8db45","Type":"ContainerStarted","Data":"f786034f9db9ea033bb9539f3b9509baf0bae18caefa7c0721895ea0d246268a"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.543483 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" event={"ID":"384b638e-e264-4492-8250-95540a24916c","Type":"ContainerStarted","Data":"1e155aa5f84aa085b7069a6a9c290dcfcfe4c536e3f7d78b7ddf6ef67e814bd3"} Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.574716 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.575102 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.075083606 +0000 UTC m=+130.162497441 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.677140 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.677757 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.177722343 +0000 UTC m=+130.265136218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.778817 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.779144 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.2790969 +0000 UTC m=+130.366510775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.880786 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.881277 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.381256391 +0000 UTC m=+130.468670226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.983718 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.983912 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.483889297 +0000 UTC m=+130.571303142 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:58 crc kubenswrapper[4685]: I0128 12:22:58.984239 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:58 crc kubenswrapper[4685]: E0128 12:22:58.984705 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.484693552 +0000 UTC m=+130.572107387 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.085406 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.085553 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.585521842 +0000 UTC m=+130.672935717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.085655 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.085972 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.585955396 +0000 UTC m=+130.673369271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.187277 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.187562 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.687515449 +0000 UTC m=+130.774929314 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.187852 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.188335 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.688312673 +0000 UTC m=+130.775726538 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.289257 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.289657 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.789610868 +0000 UTC m=+130.877024743 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.289901 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.290439 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.790419133 +0000 UTC m=+130.877832968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.391104 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.391480 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.891445539 +0000 UTC m=+130.978859424 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.493046 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.493729 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:22:59.993698144 +0000 UTC m=+131.081111989 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.565015 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" event={"ID":"edb4a604-6b03-48d4-b5ab-09e266b5eef8","Type":"ContainerStarted","Data":"e4554348b67369a8ade46f5a14ca7a0857f43281640faf12fe425e56d0d2f443"} Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.567598 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-54gh9" event={"ID":"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde","Type":"ContainerStarted","Data":"046d8b8d71a37120bd693016fed417e65b4f51ad4deb4d0f827b0dca8a2e2f94"} Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.571401 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerStarted","Data":"876dd25e6aeaa9326b7f5a32cb96b37207281d17b0db25635e63d4e3a3e754c6"} Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.573677 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" event={"ID":"75a32638-e479-4025-abd5-d00533347443","Type":"ContainerStarted","Data":"07367558624a9aa39e4e0990157c1cb6fa0dd309edd0810c3a29316d744b2e92"} Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.576843 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z2lzl" event={"ID":"08663495-9331-4b6f-b82a-67b308a9afa3","Type":"ContainerStarted","Data":"d15bfbef18e61ead4ca9beedbf5d6706f70f4ef3b784559ba50e1248f37597ed"} Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.593851 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.594313 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.094262166 +0000 UTC m=+131.181676041 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.695000 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.695545 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.195517139 +0000 UTC m=+131.282931044 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.795703 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.795949 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.295900666 +0000 UTC m=+131.383314501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.796415 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.796794 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.296775483 +0000 UTC m=+131.384189518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.899203 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.899540 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.399495602 +0000 UTC m=+131.486909477 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:22:59 crc kubenswrapper[4685]: I0128 12:22:59.899724 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:22:59 crc kubenswrapper[4685]: E0128 12:22:59.900322 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.400304377 +0000 UTC m=+131.487718252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.004541 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.004837 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.504785579 +0000 UTC m=+131.592199424 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.005239 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.005825 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.50580856 +0000 UTC m=+131.593222405 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.107068 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.107309 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.60727095 +0000 UTC m=+131.694684775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.107567 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.108042 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.608032394 +0000 UTC m=+131.695446229 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.209312 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.209564 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.709516874 +0000 UTC m=+131.796930749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.209630 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.210428 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.710401142 +0000 UTC m=+131.797815017 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.311614 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.311904 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.811847871 +0000 UTC m=+131.899261716 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.312098 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.312621 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.812592954 +0000 UTC m=+131.900006799 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.413916 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.414123 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.914081025 +0000 UTC m=+132.001494910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.414476 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.415035 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:00.915012163 +0000 UTC m=+132.002426038 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.515503 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.515860 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.015841184 +0000 UTC m=+132.103255019 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.591951 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" event={"ID":"31a31a5b-65f9-4997-b083-16382c0f6c11","Type":"ContainerStarted","Data":"f1f489a63e43f5af3d9ca79fb03a9ed79a879234346f2721f015fd838c7b5316"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.594942 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" event={"ID":"91aac1a9-a4dd-4668-af62-e76501c860ac","Type":"ContainerStarted","Data":"d9d504dbab1d134cb8aa0d4a7b068970094940eeb311a403c776e86d379dc770"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.597196 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" event={"ID":"d97a0072-f1ae-4674-8ad6-aab9c60155f3","Type":"ContainerStarted","Data":"7a4c3a79646b8cea73a6ab92432c405a2ffbe02b30cfce22330ce373ef8cbad7"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.599390 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" event={"ID":"8eb33408-9530-4b30-a357-9ecd1a094606","Type":"ContainerStarted","Data":"5c394b37c4041519a536641586cc1c1c73fb7c1adb92c35f52e35a990c833854"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.602020 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" event={"ID":"6d7a1a22-14a6-419e-b4f1-ebf636f248f9","Type":"ContainerStarted","Data":"23e97a6f06efe0c2c8722fcc20556f395c12b349a256f9f59cd25aa343c8fd23"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.604334 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" event={"ID":"ea3bd48a-bb7b-4642-a8e5-5ad2e71d31f9","Type":"ContainerStarted","Data":"20f130a95975ed3e32e6e9e5d9d9eca04e65efb3ce4487b33e510b8e9d7af666"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.607236 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-gjp8v" event={"ID":"25efaeca-af03-4a53-9955-993273376049","Type":"ContainerStarted","Data":"eaf3416afd9a0ada6f0d9167889fb611c3e18a33fb5931dc9538a4d7a823eb9f"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.610070 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" event={"ID":"9df99517-c3a0-4fde-a93f-24b84a33615a","Type":"ContainerStarted","Data":"dd695dd9704b058ae03d21aeb64e92e3823cea593b70a04426c9af7fce743088"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.612494 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" event={"ID":"2143d413-3556-4232-a3d6-145f94d98606","Type":"ContainerStarted","Data":"63d4279566342f94dd6afdad6e2312bc35942ee2e55d9c0cada55fbb362d3de4"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.615804 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" event={"ID":"0aa6b49a-8078-44f4-b1a9-2542d5bad461","Type":"ContainerStarted","Data":"a19cc0691543a9561ca9cb11f863d04005f966847556e2328724e6f08273f7f4"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.617700 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.618485 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.118458929 +0000 UTC m=+132.205872804 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.618780 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" event={"ID":"45dd6f4b-7432-459c-ba00-b849e384eaae","Type":"ContainerStarted","Data":"d34218f46337287e38c34362258700ca77c461a669a358d27c8ca390b47835e9"} Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.719324 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.719541 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.219514317 +0000 UTC m=+132.306928172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.719892 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.720340 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.220326102 +0000 UTC m=+132.307739957 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.784232 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.792785 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:00 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:00 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:00 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.793094 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.820667 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.820992 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.320968316 +0000 UTC m=+132.408382151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.906929 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-xvw2d" podStartSLOduration=96.906908358 podStartE2EDuration="1m36.906908358s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:00.905476693 +0000 UTC m=+131.992890528" watchObservedRunningTime="2026-01-28 12:23:00.906908358 +0000 UTC m=+131.994322193" Jan 28 12:23:00 crc kubenswrapper[4685]: I0128 12:23:00.922255 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:00 crc kubenswrapper[4685]: E0128 12:23:00.922777 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.422754806 +0000 UTC m=+132.510168641 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.023907 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.024093 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.524064362 +0000 UTC m=+132.611478187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.024195 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.024559 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.524552397 +0000 UTC m=+132.611966232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.125268 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.125502 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.62547422 +0000 UTC m=+132.712888055 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.125689 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.126069 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.626060098 +0000 UTC m=+132.713474013 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.227662 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.228227 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.728201389 +0000 UTC m=+132.815615254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.329756 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.330142 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.830125303 +0000 UTC m=+132.917539138 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.430857 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.431071 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.931040166 +0000 UTC m=+133.018453991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.431483 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.431924 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:01.931903852 +0000 UTC m=+133.019317687 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.532609 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.532805 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.032770394 +0000 UTC m=+133.120184229 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.532872 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.533258 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.033250929 +0000 UTC m=+133.120664764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.631954 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" event={"ID":"421ae539-a9a0-4a76-be82-23ea497077ad","Type":"ContainerStarted","Data":"c47b6a30f4a587948311b302b43fb6666d38b8fbbf8b9b177b366983f116ffec"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.634485 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.634784 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.134740109 +0000 UTC m=+133.222153994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.635095 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.635791 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.135772981 +0000 UTC m=+133.223186856 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.638099 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" event={"ID":"a7ad2819-235d-4f6d-b731-6f8e31db0b13","Type":"ContainerStarted","Data":"96b97ec1910b0e0ff384962b97b8c9bf76ca3742f14c3c68544b60060e24aa7f"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.647896 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" event={"ID":"cc7e3a89-0688-4bda-a76c-1b03e21b8419","Type":"ContainerStarted","Data":"a2ae61b917b87838ff1da6c71ffb327b05cbf2ebe29a7ea1d1a8166bfab83d2c"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.650122 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" event={"ID":"df73faab-d681-4ffe-b550-331fa336bc10","Type":"ContainerStarted","Data":"519e7d8c9228f9d16472f14715cf5461956c6ee1bb665c3eac93db662d10fc6f"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.652452 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" event={"ID":"82c02f94-72c8-43fd-b0f0-7c644d696b61","Type":"ContainerStarted","Data":"e5572c74499f132b234e4cf8852fdd9f6617fd40077bf5d7fe444089ce634563"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.655432 4685 generic.go:334] "Generic (PLEG): container finished" podID="75a32638-e479-4025-abd5-d00533347443" containerID="07367558624a9aa39e4e0990157c1cb6fa0dd309edd0810c3a29316d744b2e92" exitCode=0 Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.655547 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" event={"ID":"75a32638-e479-4025-abd5-d00533347443","Type":"ContainerDied","Data":"07367558624a9aa39e4e0990157c1cb6fa0dd309edd0810c3a29316d744b2e92"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.662862 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" event={"ID":"3fdafa55-db38-4041-bb34-f319905ad733","Type":"ContainerStarted","Data":"77bda4d97ef4e59d919beb80e4ffb40b2bfde6e053ea286e78028ad80ac4f05f"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.664969 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-slprg" event={"ID":"ee856066-85d4-41a5-86eb-9ab5e01ca0c6","Type":"ContainerStarted","Data":"bc6c8a1c695058d04b94d75a5b005cbc4501eb69fbfec5eab429636cd770bc3a"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.670271 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" event={"ID":"af98b7ab-bf20-4cb2-85ec-1eb757b8db45","Type":"ContainerStarted","Data":"28d3b9b81b7bb11d6a5db92edaa1dd19a49f6421ca114b44ad78c4268d527932"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.671616 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" event={"ID":"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7","Type":"ContainerStarted","Data":"8f44d043f864a14046f0b013156738c192cc8085dffbad9ba3df474f51f19bc4"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.673830 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" event={"ID":"f09051b7-ad38-4a84-89d7-ad84fd51fa1c","Type":"ContainerStarted","Data":"513e97027818be526337cdf847196cec1748f1a123a073e5ff5f12dc3704ff88"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.676947 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" event={"ID":"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9","Type":"ContainerStarted","Data":"47d4fc17fafc5bb4cf4a1c5dd6e3424713f3636582b76ab115e0d82da1530148"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.678746 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" event={"ID":"02b54db7-da24-4fc2-88e2-cac233ca6323","Type":"ContainerStarted","Data":"1b1398cab1f6026ddcd75ccaa4544ffce00d87c451a1f57b113a325d3d2128be"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.680413 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" event={"ID":"384b638e-e264-4492-8250-95540a24916c","Type":"ContainerStarted","Data":"baaf64ffda2228af339dc3dba36961ff66977e738ed6a60de82f094e83c09eab"} Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.681020 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.682962 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.683007 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.735082 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-54gh9" podStartSLOduration=97.735044404 podStartE2EDuration="1m37.735044404s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:01.730107441 +0000 UTC m=+132.817521276" watchObservedRunningTime="2026-01-28 12:23:01.735044404 +0000 UTC m=+132.822458279" Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.736619 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.736889 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.23685687 +0000 UTC m=+133.324270745 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.736998 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.737674 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.237657184 +0000 UTC m=+133.325071049 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.789962 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:01 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:01 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:01 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.790058 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.838046 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.838211 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.338186585 +0000 UTC m=+133.425600430 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.838660 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.839454 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.339429244 +0000 UTC m=+133.426843179 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.939572 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.939826 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.43978734 +0000 UTC m=+133.527201165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:01 crc kubenswrapper[4685]: I0128 12:23:01.940087 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:01 crc kubenswrapper[4685]: E0128 12:23:01.940767 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.440756189 +0000 UTC m=+133.528170024 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.041423 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.042103 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.542067695 +0000 UTC m=+133.629481530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.042443 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.042934 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.542904101 +0000 UTC m=+133.630317986 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.144096 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.144617 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.644594677 +0000 UTC m=+133.732008533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.245457 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.245891 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.745870362 +0000 UTC m=+133.833284197 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.346772 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.347256 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.847233348 +0000 UTC m=+133.934647183 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.448120 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.448543 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:02.948524633 +0000 UTC m=+134.035938468 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.548944 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.549098 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.049074165 +0000 UTC m=+134.136488000 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.549415 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.549827 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.049816048 +0000 UTC m=+134.137229883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.650313 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.650510 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.150479343 +0000 UTC m=+134.237893178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.650601 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.650990 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.150983029 +0000 UTC m=+134.238396864 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.688435 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" event={"ID":"4aea2580-1e20-4df6-b328-c1b99d986330","Type":"ContainerStarted","Data":"2809cd9ac99cab425d8d8beddffa21e775d9580c2b931176808abcf3b7151bbc"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.690198 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" event={"ID":"80b3c037-4d1a-4be7-81d6-1c33f1801bd6","Type":"ContainerStarted","Data":"a7c1e2b5310058464cb32912f0a5413153145dc67f398a2c8cce44d2c4296c49"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.695971 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" event={"ID":"91fd9e56-9836-4427-b58e-9c0742895c7a","Type":"ContainerStarted","Data":"1b74a283622c2ebcb056d662da66d602d7c689d34dd83561eef92054ee9cb02e"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.697618 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" event={"ID":"b65cc4af-8099-4f34-a93e-9ef9ae97996b","Type":"ContainerStarted","Data":"b298c302def05ef039386e1f6b1daab05385c7dd822669572f9425dbe368418f"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.699230 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" event={"ID":"4b002972-6a89-4a6a-9839-a69e5e9ff3e5","Type":"ContainerStarted","Data":"f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.700746 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" event={"ID":"39f4fa09-765f-45c8-8206-290bf19fab29","Type":"ContainerStarted","Data":"8d1317e5040395b655eb8da9e77d7fce21a0479dc5503d11e96dec75179ee0f5"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.702043 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-ftmbf" event={"ID":"b3a33bb2-4fb3-4988-8b43-62a9bdd2e659","Type":"ContainerStarted","Data":"f1884fc66db9750bd1c1839f23d7e1c9445cb0d42eb3f8cd5ae52c8a70c6d5f6"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.703670 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" event={"ID":"021e1574-af09-49bb-af67-1fc874bb0a06","Type":"ContainerStarted","Data":"7f894ee4d57f82ec32c0f8fc45fb6834783ae3c71a9010236873f542d80e50f2"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.706238 4685 generic.go:334] "Generic (PLEG): container finished" podID="efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9" containerID="47d4fc17fafc5bb4cf4a1c5dd6e3424713f3636582b76ab115e0d82da1530148" exitCode=0 Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.706310 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" event={"ID":"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9","Type":"ContainerDied","Data":"47d4fc17fafc5bb4cf4a1c5dd6e3424713f3636582b76ab115e0d82da1530148"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.708406 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" event={"ID":"eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20","Type":"ContainerStarted","Data":"96f1b85cbd699262824d4e5950eca7d4a2cf304f856bfb37aa97669068148083"} Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.709225 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.710027 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.710106 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.715297 4685 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-qpc29 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.715435 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.751697 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.751912 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.251884531 +0000 UTC m=+134.339298386 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.752271 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.752683 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.252668105 +0000 UTC m=+134.340081960 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.773079 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" podStartSLOduration=98.773043994 podStartE2EDuration="1m38.773043994s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:02.769411292 +0000 UTC m=+133.856825197" watchObservedRunningTime="2026-01-28 12:23:02.773043994 +0000 UTC m=+133.860457859" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.773483 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podStartSLOduration=98.773473727 podStartE2EDuration="1m38.773473727s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:02.742282065 +0000 UTC m=+133.829695900" watchObservedRunningTime="2026-01-28 12:23:02.773473727 +0000 UTC m=+133.860887592" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.791668 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:02 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:02 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:02 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.791752 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.819344 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4x7zt" podStartSLOduration=98.819300451 podStartE2EDuration="1m38.819300451s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:02.817886437 +0000 UTC m=+133.905300282" watchObservedRunningTime="2026-01-28 12:23:02.819300451 +0000 UTC m=+133.906714296" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.845353 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-z2lzl" podStartSLOduration=98.845330404 podStartE2EDuration="1m38.845330404s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:02.844426246 +0000 UTC m=+133.931840091" watchObservedRunningTime="2026-01-28 12:23:02.845330404 +0000 UTC m=+133.932744239" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.853314 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.854567 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.354526568 +0000 UTC m=+134.441940403 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.872011 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" podStartSLOduration=98.871988896 podStartE2EDuration="1m38.871988896s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:02.871630965 +0000 UTC m=+133.959044810" watchObservedRunningTime="2026-01-28 12:23:02.871988896 +0000 UTC m=+133.959402741" Jan 28 12:23:02 crc kubenswrapper[4685]: I0128 12:23:02.955293 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:02 crc kubenswrapper[4685]: E0128 12:23:02.955697 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.455680208 +0000 UTC m=+134.543094043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.056618 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.056818 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.556787127 +0000 UTC m=+134.644200962 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.057008 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.057378 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.557371735 +0000 UTC m=+134.644785570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.158656 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.158774 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.658749202 +0000 UTC m=+134.746163037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.159062 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.159380 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.659372461 +0000 UTC m=+134.746786296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.260783 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.261264 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.761242734 +0000 UTC m=+134.848656579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.363269 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.363987 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.863969863 +0000 UTC m=+134.951383698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.464995 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.465375 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:03.965277058 +0000 UTC m=+135.052690913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.566693 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.567033 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.067017955 +0000 UTC m=+135.154431790 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.668448 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.668966 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.168924299 +0000 UTC m=+135.256338284 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.714202 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.714687 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.714889 4685 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-c5jnv container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.714964 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" podUID="8eb33408-9530-4b30-a357-9ecd1a094606" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.734401 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" podStartSLOduration=99.734378628 podStartE2EDuration="1m39.734378628s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:02.898292528 +0000 UTC m=+133.985706373" watchObservedRunningTime="2026-01-28 12:23:03.734378628 +0000 UTC m=+134.821792473" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.736558 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" podStartSLOduration=99.736547795 podStartE2EDuration="1m39.736547795s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.732977855 +0000 UTC m=+134.820391690" watchObservedRunningTime="2026-01-28 12:23:03.736547795 +0000 UTC m=+134.823961640" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.770374 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.770896 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.270868564 +0000 UTC m=+135.358282439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.776784 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-68qg7" podStartSLOduration=99.776752875 podStartE2EDuration="1m39.776752875s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.751227048 +0000 UTC m=+134.838640883" watchObservedRunningTime="2026-01-28 12:23:03.776752875 +0000 UTC m=+134.864166740" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.777620 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-psh57" podStartSLOduration=99.777611122 podStartE2EDuration="1m39.777611122s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.775512187 +0000 UTC m=+134.862926022" watchObservedRunningTime="2026-01-28 12:23:03.777611122 +0000 UTC m=+134.865024977" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.787527 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:03 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:03 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:03 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.787595 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.800239 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mwwhj" podStartSLOduration=99.800215129 podStartE2EDuration="1m39.800215129s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.798377592 +0000 UTC m=+134.885791477" watchObservedRunningTime="2026-01-28 12:23:03.800215129 +0000 UTC m=+134.887628964" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.819665 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-6z8gs" podStartSLOduration=99.819643318 podStartE2EDuration="1m39.819643318s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.81709683 +0000 UTC m=+134.904510665" watchObservedRunningTime="2026-01-28 12:23:03.819643318 +0000 UTC m=+134.907057153" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.840656 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-xh8vq" podStartSLOduration=100.840630256 podStartE2EDuration="1m40.840630256s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.838028686 +0000 UTC m=+134.925442541" watchObservedRunningTime="2026-01-28 12:23:03.840630256 +0000 UTC m=+134.928044091" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.858291 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" podStartSLOduration=99.85827125 podStartE2EDuration="1m39.85827125s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.857993832 +0000 UTC m=+134.945407667" watchObservedRunningTime="2026-01-28 12:23:03.85827125 +0000 UTC m=+134.945685095" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.871427 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.873604 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.373584462 +0000 UTC m=+135.460998297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.877027 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" podStartSLOduration=100.877009378 podStartE2EDuration="1m40.877009378s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.875628926 +0000 UTC m=+134.963042761" watchObservedRunningTime="2026-01-28 12:23:03.877009378 +0000 UTC m=+134.964423213" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.891766 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" podStartSLOduration=99.891703551 podStartE2EDuration="1m39.891703551s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.88972836 +0000 UTC m=+134.977142195" watchObservedRunningTime="2026-01-28 12:23:03.891703551 +0000 UTC m=+134.979117436" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.931244 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-gjp8v" podStartSLOduration=11.9312161 podStartE2EDuration="11.9312161s" podCreationTimestamp="2026-01-28 12:22:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.929730974 +0000 UTC m=+135.017144809" watchObservedRunningTime="2026-01-28 12:23:03.9312161 +0000 UTC m=+135.018629945" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.934288 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-7bmls" podStartSLOduration=98.934278355 podStartE2EDuration="1m38.934278355s" podCreationTimestamp="2026-01-28 12:21:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.910867343 +0000 UTC m=+134.998281198" watchObservedRunningTime="2026-01-28 12:23:03.934278355 +0000 UTC m=+135.021692200" Jan 28 12:23:03 crc kubenswrapper[4685]: I0128 12:23:03.973467 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:03 crc kubenswrapper[4685]: E0128 12:23:03.973876 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.473847655 +0000 UTC m=+135.561261480 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.074365 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.574330995 +0000 UTC m=+135.661744830 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.074410 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.075377 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.075857 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.575829021 +0000 UTC m=+135.663242856 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.176836 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.177013 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.676991092 +0000 UTC m=+135.764404927 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.177231 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.177508 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.677498638 +0000 UTC m=+135.764912473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.278970 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.279157 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.779121432 +0000 UTC m=+135.866535267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.279618 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.280059 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.780050211 +0000 UTC m=+135.867464056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.381633 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.381859 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.881827521 +0000 UTC m=+135.969241356 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.381991 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.382309 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.882301065 +0000 UTC m=+135.969714900 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.440008 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.441436 4685 patch_prober.go:28] interesting pod/console-operator-58897d9998-lr8cn container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.441444 4685 patch_prober.go:28] interesting pod/console-operator-58897d9998-lr8cn container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.15:8443/healthz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.441490 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" podUID="2143d413-3556-4232-a3d6-145f94d98606" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.441514 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" podUID="2143d413-3556-4232-a3d6-145f94d98606" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/healthz\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.483531 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.484130 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:04.984084465 +0000 UTC m=+136.071498550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.487261 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.488710 4685 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7pxnw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.488786 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.490189 4685 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7pxnw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.490321 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.584899 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.585344 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.085323038 +0000 UTC m=+136.172736953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.589564 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.589615 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.589730 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.589855 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.647745 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.648941 4685 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2966r container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.649003 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.649688 4685 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2966r container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.649713 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.660979 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.661032 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.662850 4685 patch_prober.go:28] interesting pod/console-f9d7485db-z2lzl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.662902 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z2lzl" podUID="08663495-9331-4b6f-b82a-67b308a9afa3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.686201 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.686342 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.186313543 +0000 UTC m=+136.273727378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.686598 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.686977 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.186967894 +0000 UTC m=+136.274381729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.718578 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" event={"ID":"54c1da50-0209-4285-bc15-91427e15241d","Type":"ContainerStarted","Data":"5bb3ed484dda4ea38c7771f686f269108e085101ed2188e4e84d30cccb2c892d"} Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.719786 4685 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-c5jnv container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.719798 4685 patch_prober.go:28] interesting pod/console-operator-58897d9998-lr8cn container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.719844 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" podUID="8eb33408-9530-4b30-a357-9ecd1a094606" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.719877 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" podUID="2143d413-3556-4232-a3d6-145f94d98606" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.736068 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-dd6rs" podStartSLOduration=101.736040317 podStartE2EDuration="1m41.736040317s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:04.734324305 +0000 UTC m=+135.821738140" watchObservedRunningTime="2026-01-28 12:23:04.736040317 +0000 UTC m=+135.823454152" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.737247 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hbxst" podStartSLOduration=100.737239874 podStartE2EDuration="1m40.737239874s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:03.975753004 +0000 UTC m=+135.063166839" watchObservedRunningTime="2026-01-28 12:23:04.737239874 +0000 UTC m=+135.824653709" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.784275 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.787150 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:04 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:04 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:04 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.787298 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.787487 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.787763 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.287740892 +0000 UTC m=+136.375154747 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.787985 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.789226 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.289207438 +0000 UTC m=+136.376621273 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.879122 4685 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-c5jnv container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.879222 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" podUID="8eb33408-9530-4b30-a357-9ecd1a094606" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.879470 4685 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-c5jnv container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.879546 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" podUID="8eb33408-9530-4b30-a357-9ecd1a094606" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.888678 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.889425 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.389387448 +0000 UTC m=+136.476801333 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:04 crc kubenswrapper[4685]: I0128 12:23:04.990504 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:04 crc kubenswrapper[4685]: E0128 12:23:04.991651 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.491365764 +0000 UTC m=+136.578779639 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.056226 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.057781 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.062548 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.062682 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.075358 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.092005 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.092319 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.592282057 +0000 UTC m=+136.679695892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.092771 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.093264 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.593247297 +0000 UTC m=+136.680661202 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.147820 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.150527 4685 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-6wksk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.150741 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" podUID="3fdafa55-db38-4041-bb34-f319905ad733" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.150551 4685 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-6wksk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.150964 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" podUID="3fdafa55-db38-4041-bb34-f319905ad733" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.151448 4685 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-6wksk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.151517 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" podUID="3fdafa55-db38-4041-bb34-f319905ad733" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.194662 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.194812 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.694779909 +0000 UTC m=+136.782193784 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.195358 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.195502 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/81539868-3464-4bfe-8a42-08c54ea1b0df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.195613 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81539868-3464-4bfe-8a42-08c54ea1b0df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.195780 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.695760269 +0000 UTC m=+136.783174144 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.297462 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.297660 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.797611051 +0000 UTC m=+136.885024916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.298454 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.298715 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/81539868-3464-4bfe-8a42-08c54ea1b0df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.298878 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81539868-3464-4bfe-8a42-08c54ea1b0df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.298801 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/81539868-3464-4bfe-8a42-08c54ea1b0df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.298916 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.79889642 +0000 UTC m=+136.886310365 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.326240 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81539868-3464-4bfe-8a42-08c54ea1b0df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.382251 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.401055 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.401730 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:05.901702612 +0000 UTC m=+136.989116487 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.503426 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.503942 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.003922275 +0000 UTC m=+137.091336130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.589207 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.604112 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.604613 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.104595361 +0000 UTC m=+137.192009196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.706198 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.706496 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.206483574 +0000 UTC m=+137.293897409 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.724862 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"81539868-3464-4bfe-8a42-08c54ea1b0df","Type":"ContainerStarted","Data":"f8efed0309bb0ffb9aa76b2ebbb47734ffc85da2af0bdc25cd9d17c8a2125435"} Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.786370 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:05 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:05 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:05 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.786447 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.807858 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.808071 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.308040057 +0000 UTC m=+137.395453892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.808219 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.809063 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.309049958 +0000 UTC m=+137.396463793 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.909717 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.909931 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.409893659 +0000 UTC m=+137.497307554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:05 crc kubenswrapper[4685]: I0128 12:23:05.910907 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:05 crc kubenswrapper[4685]: E0128 12:23:05.911456 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.411430956 +0000 UTC m=+137.498844831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.012343 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.012805 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.512787313 +0000 UTC m=+137.600201148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.114201 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.114799 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.614771649 +0000 UTC m=+137.702185514 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.215820 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.216049 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.716017122 +0000 UTC m=+137.803430957 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.216369 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.216811 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.716800846 +0000 UTC m=+137.804214681 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.317795 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.318027 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.817988167 +0000 UTC m=+137.905402042 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.318388 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.318842 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.818825793 +0000 UTC m=+137.906239658 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.418976 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.419284 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.919237671 +0000 UTC m=+138.006651526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.420161 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.420667 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:06.920652384 +0000 UTC m=+138.008066229 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.522106 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.522950 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.022917529 +0000 UTC m=+138.110331404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.624800 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.625439 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.125412751 +0000 UTC m=+138.212826596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.727934 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.728274 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.228224452 +0000 UTC m=+138.315638327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.729213 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.729768 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.229741379 +0000 UTC m=+138.317155224 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.732518 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" event={"ID":"b22a7729-d10a-412c-8df1-30992ba607b0","Type":"ContainerStarted","Data":"c5bb49c9209233b4117653c1458b61e058e5c6a7cdfaa8f5e358061da7b32359"} Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.732827 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.735828 4685 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-h6gqk container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" start-of-body= Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.735945 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" podUID="eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.758307 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" podStartSLOduration=102.7582863 podStartE2EDuration="1m42.7582863s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:06.758114564 +0000 UTC m=+137.845528409" watchObservedRunningTime="2026-01-28 12:23:06.7582863 +0000 UTC m=+137.845700135" Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.776645 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" podStartSLOduration=103.776623305 podStartE2EDuration="1m43.776623305s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:06.776191362 +0000 UTC m=+137.863605217" watchObservedRunningTime="2026-01-28 12:23:06.776623305 +0000 UTC m=+137.864037150" Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.787028 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:06 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:06 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:06 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.788536 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.800409 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-pvrr6" podStartSLOduration=101.800393509 podStartE2EDuration="1m41.800393509s" podCreationTimestamp="2026-01-28 12:21:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:06.79687879 +0000 UTC m=+137.884292625" watchObservedRunningTime="2026-01-28 12:23:06.800393509 +0000 UTC m=+137.887807354" Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.830944 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.831180 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.331136447 +0000 UTC m=+138.418550282 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.831256 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.831780 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.331755126 +0000 UTC m=+138.419168971 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.932427 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.932682 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.432638448 +0000 UTC m=+138.520052313 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:06 crc kubenswrapper[4685]: I0128 12:23:06.933025 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:06 crc kubenswrapper[4685]: E0128 12:23:06.933571 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.433542076 +0000 UTC m=+138.520955941 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.034235 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.034467 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.534436468 +0000 UTC m=+138.621850323 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.034761 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.035147 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.5351343 +0000 UTC m=+138.622548125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.136368 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.136831 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.636738593 +0000 UTC m=+138.724152508 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.137338 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.138253 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.638223099 +0000 UTC m=+138.725636974 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.238818 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.239739 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.739693369 +0000 UTC m=+138.827107234 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.239917 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.240679 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.740655029 +0000 UTC m=+138.828068904 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.342365 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.342639 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.842597894 +0000 UTC m=+138.930011729 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.342908 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.343360 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.843352737 +0000 UTC m=+138.930766572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.444466 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.444722 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.944682013 +0000 UTC m=+139.032095858 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.445018 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.445749 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:07.945724695 +0000 UTC m=+139.033138580 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.546512 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.546705 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.046665369 +0000 UTC m=+139.134079244 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.546828 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.547360 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.047342069 +0000 UTC m=+139.134755944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.647908 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.648123 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.148087587 +0000 UTC m=+139.235501472 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.648274 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.649118 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.149093308 +0000 UTC m=+139.236507183 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.739289 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" event={"ID":"31a31a5b-65f9-4997-b083-16382c0f6c11","Type":"ContainerStarted","Data":"c7e231ce33ca96bc24ccf918e906065a2deee80ecdda89876bac5f5d78d32022"} Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.740485 4685 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-h6gqk container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" start-of-body= Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.740542 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" podUID="eccf5b8d-9fb2-4d8d-81e3-08265c0bdb20" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.749230 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.749412 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.249382042 +0000 UTC m=+139.336795877 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.749638 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.750036 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.250025592 +0000 UTC m=+139.337439427 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.758912 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-vcrmr" podStartSLOduration=103.758892045 podStartE2EDuration="1m43.758892045s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:07.755251223 +0000 UTC m=+138.842665058" watchObservedRunningTime="2026-01-28 12:23:07.758892045 +0000 UTC m=+138.846305910" Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.790362 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:07 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:07 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:07 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.790473 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.850957 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.851208 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.351150351 +0000 UTC m=+139.438564196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.851418 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.852377 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.352362999 +0000 UTC m=+139.439776834 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.952474 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.952757 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.452710964 +0000 UTC m=+139.540124799 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:07 crc kubenswrapper[4685]: I0128 12:23:07.952902 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:07 crc kubenswrapper[4685]: E0128 12:23:07.953455 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.453432717 +0000 UTC m=+139.540846552 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.054226 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.054879 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.554858275 +0000 UTC m=+139.642272110 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.156349 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.156839 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.65681386 +0000 UTC m=+139.744227695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.257599 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.257929 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.757886648 +0000 UTC m=+139.845300483 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.258451 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.258966 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.758950811 +0000 UTC m=+139.846364646 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.359372 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.359557 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.859513383 +0000 UTC m=+139.946927218 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.359964 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.360334 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.860313508 +0000 UTC m=+139.947727343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.461725 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.461940 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.961900822 +0000 UTC m=+140.049314667 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.462282 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.462697 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:08.962683366 +0000 UTC m=+140.050097211 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.563900 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.564052 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.064021702 +0000 UTC m=+140.151435537 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.564804 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.565233 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.065224799 +0000 UTC m=+140.152638634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.666069 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.666260 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.166227665 +0000 UTC m=+140.253641500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.667146 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.667511 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.167499704 +0000 UTC m=+140.254913529 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.748059 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.749391 4685 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4gdsr container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.39:6443/healthz\": dial tcp 10.217.0.39:6443: connect: connection refused" start-of-body= Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.749457 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" podUID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.39:6443/healthz\": dial tcp 10.217.0.39:6443: connect: connection refused" Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.768934 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.769085 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.269054267 +0000 UTC m=+140.356468112 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.769725 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.770134 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.27012337 +0000 UTC m=+140.357537385 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.770634 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" podStartSLOduration=105.770616345 podStartE2EDuration="1m45.770616345s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:08.768547901 +0000 UTC m=+139.855961736" watchObservedRunningTime="2026-01-28 12:23:08.770616345 +0000 UTC m=+139.858030180" Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.787429 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:08 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:08 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:08 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.787954 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.793118 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-ftmbf" podStartSLOduration=16.793089848 podStartE2EDuration="16.793089848s" podCreationTimestamp="2026-01-28 12:22:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:08.789940341 +0000 UTC m=+139.877354196" watchObservedRunningTime="2026-01-28 12:23:08.793089848 +0000 UTC m=+139.880503693" Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.870619 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.870842 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.370807856 +0000 UTC m=+140.458221701 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.872131 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.873705 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.373688565 +0000 UTC m=+140.461102400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.974522 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.974708 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.47468017 +0000 UTC m=+140.562094005 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:08 crc kubenswrapper[4685]: I0128 12:23:08.974882 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:08 crc kubenswrapper[4685]: E0128 12:23:08.975314 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.475299779 +0000 UTC m=+140.562713614 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.076275 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.076495 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.5764651 +0000 UTC m=+140.663878935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.076851 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.077166 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.577158501 +0000 UTC m=+140.664572336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.178247 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.178420 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.678394494 +0000 UTC m=+140.765808339 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.178565 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.178849 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.678840108 +0000 UTC m=+140.766253943 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.279775 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.279984 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.779947157 +0000 UTC m=+140.867360992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.280213 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.280631 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.780620348 +0000 UTC m=+140.868034183 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.381611 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.382138 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.882117389 +0000 UTC m=+140.969531224 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.423075 4685 csr.go:261] certificate signing request csr-jdsl9 is approved, waiting to be issued Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.430740 4685 csr.go:257] certificate signing request csr-jdsl9 is issued Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.483646 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.484002 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:09.983988491 +0000 UTC m=+141.071402326 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.585190 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.585528 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.085511343 +0000 UTC m=+141.172925178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.686859 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.687286 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.187268862 +0000 UTC m=+141.274682697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.767007 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" event={"ID":"efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9","Type":"ContainerStarted","Data":"6fa08fb1cc2c5692676b181b7de7cedd35ce19f53e8289a55f91d00bdb3e75c0"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.770610 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"81539868-3464-4bfe-8a42-08c54ea1b0df","Type":"ContainerStarted","Data":"f29b870e9642ebefff8d16c3badcd1d93526337c7c9a18c92cf5073e9ced308e"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.772298 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" event={"ID":"a7ad2819-235d-4f6d-b731-6f8e31db0b13","Type":"ContainerStarted","Data":"8d33ffd122a15b6fb5e27c350002b6198dc8022005be20d22ed78b5d75cee8fc"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.788733 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.788923 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.288885847 +0000 UTC m=+141.376299682 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.789179 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.789531 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.289517736 +0000 UTC m=+141.376931571 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.789979 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:09 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:09 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:09 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.790022 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.791945 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-slprg" event={"ID":"ee856066-85d4-41a5-86eb-9ab5e01ca0c6","Type":"ContainerStarted","Data":"789d8a5b13ba18e7fb95596d136910b9e5e75fd6e6a3db6fd95f619cc10df699"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.800835 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" event={"ID":"021e1574-af09-49bb-af67-1fc874bb0a06","Type":"ContainerStarted","Data":"9dd7bb9ff08050361b4bea53e8aad00ad4f2cf13d01e0d08e455bbe8568de839"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.802991 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" event={"ID":"b65cc4af-8099-4f34-a93e-9ef9ae97996b","Type":"ContainerStarted","Data":"d598730d88920b881529163aec6c987f98ff49ecf3ffa915472930379710c899"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.804290 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" event={"ID":"82c02f94-72c8-43fd-b0f0-7c644d696b61","Type":"ContainerStarted","Data":"05aafec88d5f5bf7130c2ba092dbb5340573eb4b2af062580b1553a960c9348d"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.805463 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" event={"ID":"f09051b7-ad38-4a84-89d7-ad84fd51fa1c","Type":"ContainerStarted","Data":"1c80867d8437afe655342f89795114c3c897f3cdd689f008f7e407f1975dd9ff"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.822459 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" event={"ID":"af98b7ab-bf20-4cb2-85ec-1eb757b8db45","Type":"ContainerStarted","Data":"266dfd58258525f936c507a8376f68cc2e3c921b62877f4a554103cccdd003d0"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.828726 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" event={"ID":"75a32638-e479-4025-abd5-d00533347443","Type":"ContainerStarted","Data":"651e19d23f9e4143b9b049a80e45de9aedd49de06572db7d869c0a1f474312c4"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.829756 4685 generic.go:334] "Generic (PLEG): container finished" podID="384b638e-e264-4492-8250-95540a24916c" containerID="baaf64ffda2228af339dc3dba36961ff66977e738ed6a60de82f094e83c09eab" exitCode=0 Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.829816 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" event={"ID":"384b638e-e264-4492-8250-95540a24916c","Type":"ContainerDied","Data":"baaf64ffda2228af339dc3dba36961ff66977e738ed6a60de82f094e83c09eab"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.842559 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" event={"ID":"0353bfa2-54e9-4f75-ab25-ed65d14a9ab7","Type":"ContainerStarted","Data":"35d54178f7b7c6627b760a439130ca0b34eacb57404a83a27d38d8a27d481c7a"} Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.890047 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.890273 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.390238523 +0000 UTC m=+141.477652358 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.890459 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.891663 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.391655247 +0000 UTC m=+141.479069082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.991463 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.991636 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.49161112 +0000 UTC m=+141.579024955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:09 crc kubenswrapper[4685]: I0128 12:23:09.991722 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:09 crc kubenswrapper[4685]: E0128 12:23:09.992014 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.492007143 +0000 UTC m=+141.579420978 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.092601 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.092848 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.592818652 +0000 UTC m=+141.680232537 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.194534 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.194939 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.694921842 +0000 UTC m=+141.782335677 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.295851 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.795822155 +0000 UTC m=+141.883236030 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.295710 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.296135 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.296538 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.796521966 +0000 UTC m=+141.883935831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.397716 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.397945 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.897908144 +0000 UTC m=+141.985321979 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.398064 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.398483 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.898470121 +0000 UTC m=+141.985883956 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.432355 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-28 12:18:09 +0000 UTC, rotation deadline is 2026-12-01 05:11:43.035345544 +0000 UTC Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.432397 4685 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7360h48m32.602951086s for next certificate rotation Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.499264 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.499576 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:10.999556329 +0000 UTC m=+142.086970164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.600930 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.601376 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.10135716 +0000 UTC m=+142.188770995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.618735 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.619408 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.624636 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.624636 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.628011 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.702492 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.702705 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.202664125 +0000 UTC m=+142.290077960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.702793 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec91605c-e13c-49be-9329-217decae5afd-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.702898 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.702941 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec91605c-e13c-49be-9329-217decae5afd-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.703302 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.203291824 +0000 UTC m=+142.290705659 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.786809 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:10 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:10 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:10 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.787159 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.803991 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.804188 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.304117183 +0000 UTC m=+142.391531018 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.804535 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.804597 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec91605c-e13c-49be-9329-217decae5afd-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.804655 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec91605c-e13c-49be-9329-217decae5afd-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.804959 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.304949039 +0000 UTC m=+142.392362874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.805025 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec91605c-e13c-49be-9329-217decae5afd-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.815618 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9q8q2"] Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.817002 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.818799 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.842714 4685 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4gdsr container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.39:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.842820 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" podUID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.39:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.850323 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec91605c-e13c-49be-9329-217decae5afd-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.858914 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" event={"ID":"80b3c037-4d1a-4be7-81d6-1c33f1801bd6","Type":"ContainerStarted","Data":"b37edc5dd23485e36607fd1e2f93fb62daaefbcc565969b02909d673711db8fc"} Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.861650 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9q8q2"] Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.902600 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-6rxh9" podStartSLOduration=106.902581911 podStartE2EDuration="1m46.902581911s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:10.898101713 +0000 UTC m=+141.985515548" watchObservedRunningTime="2026-01-28 12:23:10.902581911 +0000 UTC m=+141.989995746" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.905727 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.905858 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.405832931 +0000 UTC m=+142.493246766 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.905937 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.905989 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mqdw\" (UniqueName: \"kubernetes.io/projected/8d93a170-5ad3-489b-b3be-7e3cc4201970-kube-api-access-8mqdw\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.906025 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-utilities\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.906077 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-catalog-content\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:10 crc kubenswrapper[4685]: E0128 12:23:10.906851 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.406820802 +0000 UTC m=+142.494234717 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:10 crc kubenswrapper[4685]: I0128 12:23:10.937654 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.006947 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.007137 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mqdw\" (UniqueName: \"kubernetes.io/projected/8d93a170-5ad3-489b-b3be-7e3cc4201970-kube-api-access-8mqdw\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.007187 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-utilities\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.007232 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-catalog-content\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.007979 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-catalog-content\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.008399 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.508381345 +0000 UTC m=+142.595795180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.008870 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-utilities\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.018945 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-924cb"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.019912 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.023638 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.044318 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mqdw\" (UniqueName: \"kubernetes.io/projected/8d93a170-5ad3-489b-b3be-7e3cc4201970-kube-api-access-8mqdw\") pod \"certified-operators-9q8q2\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.045475 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-924cb"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.122805 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-catalog-content\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.122852 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.122890 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8m5z\" (UniqueName: \"kubernetes.io/projected/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-kube-api-access-f8m5z\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.122917 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-utilities\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.123284 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.623266069 +0000 UTC m=+142.710679904 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.142638 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.217317 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ntg69"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.218263 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.225736 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.226140 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.72607072 +0000 UTC m=+142.813484555 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.226384 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-catalog-content\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.226436 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.226485 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8m5z\" (UniqueName: \"kubernetes.io/projected/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-kube-api-access-f8m5z\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.226519 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-utilities\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.226910 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.726898645 +0000 UTC m=+142.814312480 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.227208 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-catalog-content\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.227238 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-utilities\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.240736 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ntg69"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.261148 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8m5z\" (UniqueName: \"kubernetes.io/projected/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-kube-api-access-f8m5z\") pod \"community-operators-924cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.340889 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.341183 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-utilities\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.341229 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glfkg\" (UniqueName: \"kubernetes.io/projected/7d2d54a9-9f8c-4158-8551-c4351bca8c19-kube-api-access-glfkg\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.341272 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-catalog-content\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.341405 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.841386087 +0000 UTC m=+142.928799922 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.367335 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.388081 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.413984 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gvfhc"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.415069 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.428752 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gvfhc"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.442807 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfjmg\" (UniqueName: \"kubernetes.io/projected/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-kube-api-access-gfjmg\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.442847 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-utilities\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.442867 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-catalog-content\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.442900 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glfkg\" (UniqueName: \"kubernetes.io/projected/7d2d54a9-9f8c-4158-8551-c4351bca8c19-kube-api-access-glfkg\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.442923 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.442941 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-utilities\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.442968 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-catalog-content\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.443412 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-utilities\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.443544 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:11.943530388 +0000 UTC m=+143.030944223 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.464669 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glfkg\" (UniqueName: \"kubernetes.io/projected/7d2d54a9-9f8c-4158-8551-c4351bca8c19-kube-api-access-glfkg\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.474200 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9q8q2"] Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.543806 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.543957 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.043929595 +0000 UTC m=+143.131343430 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.544024 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.544054 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-utilities\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.544148 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfjmg\" (UniqueName: \"kubernetes.io/projected/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-kube-api-access-gfjmg\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.544187 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-catalog-content\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.544710 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-catalog-content\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.544713 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-utilities\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.544945 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.044929566 +0000 UTC m=+143.132343401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.558222 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfjmg\" (UniqueName: \"kubernetes.io/projected/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-kube-api-access-gfjmg\") pod \"community-operators-gvfhc\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.581394 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-catalog-content\") pod \"certified-operators-ntg69\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.603824 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.646580 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/384b638e-e264-4492-8250-95540a24916c-secret-volume\") pod \"384b638e-e264-4492-8250-95540a24916c\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.647045 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbjdb\" (UniqueName: \"kubernetes.io/projected/384b638e-e264-4492-8250-95540a24916c-kube-api-access-bbjdb\") pod \"384b638e-e264-4492-8250-95540a24916c\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.647072 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/384b638e-e264-4492-8250-95540a24916c-config-volume\") pod \"384b638e-e264-4492-8250-95540a24916c\" (UID: \"384b638e-e264-4492-8250-95540a24916c\") " Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.647355 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.647753 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.147734037 +0000 UTC m=+143.235147872 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.653195 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/384b638e-e264-4492-8250-95540a24916c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "384b638e-e264-4492-8250-95540a24916c" (UID: "384b638e-e264-4492-8250-95540a24916c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.654698 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/384b638e-e264-4492-8250-95540a24916c-config-volume" (OuterVolumeSpecName: "config-volume") pod "384b638e-e264-4492-8250-95540a24916c" (UID: "384b638e-e264-4492-8250-95540a24916c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.657146 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/384b638e-e264-4492-8250-95540a24916c-kube-api-access-bbjdb" (OuterVolumeSpecName: "kube-api-access-bbjdb") pod "384b638e-e264-4492-8250-95540a24916c" (UID: "384b638e-e264-4492-8250-95540a24916c"). InnerVolumeSpecName "kube-api-access-bbjdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.735035 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.749281 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.749345 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbjdb\" (UniqueName: \"kubernetes.io/projected/384b638e-e264-4492-8250-95540a24916c-kube-api-access-bbjdb\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.749357 4685 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/384b638e-e264-4492-8250-95540a24916c-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.749366 4685 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/384b638e-e264-4492-8250-95540a24916c-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.749731 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.249709613 +0000 UTC m=+143.337123448 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.790343 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:11 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:11 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:11 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.790402 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.843236 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.851911 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.852279 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.352250336 +0000 UTC m=+143.439664171 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.869051 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerStarted","Data":"55f3b0c9d15a20d69ddfb8460e50ace0d6be50d7231959400186c86ffd4af39b"} Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.881162 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ec91605c-e13c-49be-9329-217decae5afd","Type":"ContainerStarted","Data":"127f4c1a8a1f1ed8f332b9fd52d8f5ba12fd7321de28c19fb48b0f1695f52795"} Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.911933 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.912000 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp" event={"ID":"384b638e-e264-4492-8250-95540a24916c","Type":"ContainerDied","Data":"1e155aa5f84aa085b7069a6a9c290dcfcfe4c536e3f7d78b7ddf6ef67e814bd3"} Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.912048 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e155aa5f84aa085b7069a6a9c290dcfcfe4c536e3f7d78b7ddf6ef67e814bd3" Jan 28 12:23:11 crc kubenswrapper[4685]: I0128 12:23:11.956022 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:11 crc kubenswrapper[4685]: E0128 12:23:11.958423 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.458409451 +0000 UTC m=+143.545823286 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.058823 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.059162 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.559132248 +0000 UTC m=+143.646546083 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.059543 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.059968 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.559960164 +0000 UTC m=+143.647373999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.085920 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xkxzp" podStartSLOduration=108.085899694 podStartE2EDuration="1m48.085899694s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:11.936215436 +0000 UTC m=+143.023629271" watchObservedRunningTime="2026-01-28 12:23:12.085899694 +0000 UTC m=+143.173313529" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.087727 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gvfhc"] Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.160062 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.160266 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.660221017 +0000 UTC m=+143.747634852 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.160400 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.160831 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.660822705 +0000 UTC m=+143.748236530 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.196428 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-924cb"] Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.204862 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ntg69"] Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.262705 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.263600 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.763567385 +0000 UTC m=+143.850981220 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.364756 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.365432 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.865397566 +0000 UTC m=+143.952811451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.466057 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.466371 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.966307889 +0000 UTC m=+144.053721734 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.466580 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.467007 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:12.96699719 +0000 UTC m=+144.054411025 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.567995 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.568522 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.068503991 +0000 UTC m=+144.155917836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.669535 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.669885 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.169872218 +0000 UTC m=+144.257286053 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.770980 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.771228 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.271188534 +0000 UTC m=+144.358602369 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.771529 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.771934 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.271924866 +0000 UTC m=+144.359338701 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.787324 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:12 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:12 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:12 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.787419 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.807678 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8cf72"] Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.807927 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="384b638e-e264-4492-8250-95540a24916c" containerName="collect-profiles" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.807946 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="384b638e-e264-4492-8250-95540a24916c" containerName="collect-profiles" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.808068 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="384b638e-e264-4492-8250-95540a24916c" containerName="collect-profiles" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.808858 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.811438 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.822481 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8cf72"] Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.872888 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.873138 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-utilities\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.873225 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rdrn\" (UniqueName: \"kubernetes.io/projected/424bdf45-4dcb-4b13-b68f-e55e115238bb-kube-api-access-2rdrn\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.873243 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-catalog-content\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.873407 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.373390946 +0000 UTC m=+144.460804781 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.918954 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerStarted","Data":"918f0e08f6766c49518b045eb18a6059f0d184d963cb3daa1adfd03ecfe865ea"} Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.921189 4685 generic.go:334] "Generic (PLEG): container finished" podID="81539868-3464-4bfe-8a42-08c54ea1b0df" containerID="f29b870e9642ebefff8d16c3badcd1d93526337c7c9a18c92cf5073e9ced308e" exitCode=0 Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.921235 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"81539868-3464-4bfe-8a42-08c54ea1b0df","Type":"ContainerDied","Data":"f29b870e9642ebefff8d16c3badcd1d93526337c7c9a18c92cf5073e9ced308e"} Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.922565 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerStarted","Data":"7fda746dcc79e22591455ebfc8032651b028b8c47fcaca3e47f5208e0a044310"} Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.924217 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntg69" event={"ID":"7d2d54a9-9f8c-4158-8551-c4351bca8c19","Type":"ContainerStarted","Data":"db225b671fe985f01ec3bc0725d86c5028aa2ab2b56f6012c6a2ac67b28f6c7e"} Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.926464 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.926488 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-slprg" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.926498 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.927766 4685 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-dwzsq container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.927803 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" podUID="75a32638-e479-4025-abd5-d00533347443" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.929855 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-slprg" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.974717 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.974905 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rdrn\" (UniqueName: \"kubernetes.io/projected/424bdf45-4dcb-4b13-b68f-e55e115238bb-kube-api-access-2rdrn\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.975012 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-catalog-content\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: E0128 12:23:12.975124 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.475102504 +0000 UTC m=+144.562516339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.975625 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-utilities\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.976688 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-catalog-content\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:12 crc kubenswrapper[4685]: I0128 12:23:12.979783 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-utilities\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.022018 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rdrn\" (UniqueName: \"kubernetes.io/projected/424bdf45-4dcb-4b13-b68f-e55e115238bb-kube-api-access-2rdrn\") pod \"redhat-marketplace-8cf72\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.038824 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" podStartSLOduration=109.038805739 podStartE2EDuration="1m49.038805739s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.02066522 +0000 UTC m=+144.108079055" watchObservedRunningTime="2026-01-28 12:23:13.038805739 +0000 UTC m=+144.126219574" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.077103 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.077367 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.577310567 +0000 UTC m=+144.664724402 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.077852 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.078223 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.578215205 +0000 UTC m=+144.665629040 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.097032 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-xcd5g" podStartSLOduration=109.096961823 podStartE2EDuration="1m49.096961823s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.094569929 +0000 UTC m=+144.181983764" watchObservedRunningTime="2026-01-28 12:23:13.096961823 +0000 UTC m=+144.184375658" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.127094 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.149757 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" podStartSLOduration=109.149744141 podStartE2EDuration="1m49.149744141s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.140057763 +0000 UTC m=+144.227471598" watchObservedRunningTime="2026-01-28 12:23:13.149744141 +0000 UTC m=+144.237157976" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.150062 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" podStartSLOduration=110.150057261 podStartE2EDuration="1m50.150057261s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.122996986 +0000 UTC m=+144.210410831" watchObservedRunningTime="2026-01-28 12:23:13.150057261 +0000 UTC m=+144.237471096" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.171097 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kvphq" podStartSLOduration=109.171076809 podStartE2EDuration="1m49.171076809s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.170653606 +0000 UTC m=+144.258067441" watchObservedRunningTime="2026-01-28 12:23:13.171076809 +0000 UTC m=+144.258490644" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.176384 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qfcjm" podStartSLOduration=110.176356972 podStartE2EDuration="1m50.176356972s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.153113845 +0000 UTC m=+144.240527680" watchObservedRunningTime="2026-01-28 12:23:13.176356972 +0000 UTC m=+144.263770807" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.179130 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.179323 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.679274362 +0000 UTC m=+144.766688197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.179663 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.180208 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.680200871 +0000 UTC m=+144.767614696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.189442 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-slprg" podStartSLOduration=21.189423715 podStartE2EDuration="21.189423715s" podCreationTimestamp="2026-01-28 12:22:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.186522236 +0000 UTC m=+144.273936071" watchObservedRunningTime="2026-01-28 12:23:13.189423715 +0000 UTC m=+144.276837550" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.208337 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-smscz"] Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.209347 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.223541 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-smscz"] Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.281448 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.281591 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.781565618 +0000 UTC m=+144.868979453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.281773 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-utilities\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.281829 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.281870 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq6fj\" (UniqueName: \"kubernetes.io/projected/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-kube-api-access-zq6fj\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.281911 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-catalog-content\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.282492 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.782469116 +0000 UTC m=+144.869882951 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.382733 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.382928 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.882900174 +0000 UTC m=+144.970314009 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.383134 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-utilities\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.383208 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.383258 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq6fj\" (UniqueName: \"kubernetes.io/projected/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-kube-api-access-zq6fj\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.383316 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-catalog-content\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.383638 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-utilities\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.383679 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.883662547 +0000 UTC m=+144.971076382 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.383712 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-catalog-content\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.407029 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq6fj\" (UniqueName: \"kubernetes.io/projected/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-kube-api-access-zq6fj\") pod \"redhat-marketplace-smscz\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.484070 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.484336 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.984296872 +0000 UTC m=+145.071710707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.484475 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.484880 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:13.984871419 +0000 UTC m=+145.072285254 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.503294 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8cf72"] Jan 28 12:23:13 crc kubenswrapper[4685]: W0128 12:23:13.510917 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod424bdf45_4dcb_4b13_b68f_e55e115238bb.slice/crio-0d50f9d6fe807b9be40093668486b29f1b3403426ea9ba079cd10c933db35e27 WatchSource:0}: Error finding container 0d50f9d6fe807b9be40093668486b29f1b3403426ea9ba079cd10c933db35e27: Status 404 returned error can't find the container with id 0d50f9d6fe807b9be40093668486b29f1b3403426ea9ba079cd10c933db35e27 Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.532833 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.586004 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.586412 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.086394921 +0000 UTC m=+145.173808756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.688920 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.689562 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.189534703 +0000 UTC m=+145.276948728 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.754074 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-smscz"] Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.788628 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:13 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:13 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:13 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.788746 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.791464 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.792080 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.292048215 +0000 UTC m=+145.379462050 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.792322 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.792900 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.29285115 +0000 UTC m=+145.380265005 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.893747 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.894028 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.39398179 +0000 UTC m=+145.481395635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.894344 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.894839 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.394826776 +0000 UTC m=+145.482240631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.934428 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" event={"ID":"b22a7729-d10a-412c-8df1-30992ba607b0","Type":"ContainerStarted","Data":"9c5ec2059f0045cb75f73071148c61f086bc170c821b4409b5d23bf9584e996d"} Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.936359 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerStarted","Data":"24a2eb8eed1cf1eddb2a5326a24d4479c5464665a24cb09cf22f47358b7f2f75"} Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.938331 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerStarted","Data":"0d50f9d6fe807b9be40093668486b29f1b3403426ea9ba079cd10c933db35e27"} Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.939336 4685 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-dwzsq container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.939402 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" podUID="75a32638-e479-4025-abd5-d00533347443" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.956256 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-lw2hz" podStartSLOduration=109.95622957 podStartE2EDuration="1m49.95622957s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:13.955562129 +0000 UTC m=+145.042976004" watchObservedRunningTime="2026-01-28 12:23:13.95622957 +0000 UTC m=+145.043643435" Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.996926 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:13 crc kubenswrapper[4685]: E0128 12:23:13.997258 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.497212614 +0000 UTC m=+145.584626479 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:13 crc kubenswrapper[4685]: I0128 12:23:13.999251 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:13.999707 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.49968751 +0000 UTC m=+145.587101335 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.017281 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-kfqrq" podStartSLOduration=110.017256142 podStartE2EDuration="1m50.017256142s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:14.014300911 +0000 UTC m=+145.101714776" watchObservedRunningTime="2026-01-28 12:23:14.017256142 +0000 UTC m=+145.104669977" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.100209 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.100327 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.600306664 +0000 UTC m=+145.687720499 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.100653 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.100957 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.600949924 +0000 UTC m=+145.688363759 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.202826 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.203130 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.703085155 +0000 UTC m=+145.790498990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.203321 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.203757 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.703748275 +0000 UTC m=+145.791162110 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.210519 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wl9lt"] Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.212053 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.218020 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.247242 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wl9lt"] Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.304436 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.304748 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.804728419 +0000 UTC m=+145.892142254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.305255 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bjln\" (UniqueName: \"kubernetes.io/projected/e68fe67b-133d-4474-8f6d-a781bca954d7-kube-api-access-6bjln\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.305448 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-utilities\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.305568 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-catalog-content\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.305664 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.306065 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.80604507 +0000 UTC m=+145.893458895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.407162 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.407484 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-utilities\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.407571 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.90750357 +0000 UTC m=+145.994917405 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.407654 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-catalog-content\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.407737 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.407863 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-utilities\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.407942 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bjln\" (UniqueName: \"kubernetes.io/projected/e68fe67b-133d-4474-8f6d-a781bca954d7-kube-api-access-6bjln\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.408354 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:14.908341416 +0000 UTC m=+145.995755441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.409021 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-catalog-content\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.430737 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bjln\" (UniqueName: \"kubernetes.io/projected/e68fe67b-133d-4474-8f6d-a781bca954d7-kube-api-access-6bjln\") pod \"redhat-operators-wl9lt\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.448066 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-lr8cn" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.489698 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.508874 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.509089 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.009056312 +0000 UTC m=+146.096470147 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.509219 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.509790 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.009763424 +0000 UTC m=+146.097177259 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.538657 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.589978 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.590055 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.590292 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.590327 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.611488 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.611601 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5zhxv"] Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.612939 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.613191 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.113145223 +0000 UTC m=+146.200559068 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.625126 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5zhxv"] Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.667501 4685 patch_prober.go:28] interesting pod/console-f9d7485db-z2lzl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.667599 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z2lzl" podUID="08663495-9331-4b6f-b82a-67b308a9afa3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.667647 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.671657 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.671691 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.676467 4685 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-pwbtq container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.11:8443/livez\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.676543 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" podUID="efb1db38-b6e2-4a3a-b26b-a8d8d723f7b9" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.11:8443/livez\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.713787 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.713853 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhtx5\" (UniqueName: \"kubernetes.io/projected/6d558882-5a65-41ae-bcf0-d13c7cecc034-kube-api-access-jhtx5\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.713981 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-catalog-content\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.714148 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-utilities\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.715576 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.215558893 +0000 UTC m=+146.302972728 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.788136 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:14 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:14 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:14 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.788235 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.815016 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.815434 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-catalog-content\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.815498 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-utilities\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.815578 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhtx5\" (UniqueName: \"kubernetes.io/projected/6d558882-5a65-41ae-bcf0-d13c7cecc034-kube-api-access-jhtx5\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.816082 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.316057813 +0000 UTC m=+146.403471648 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.817521 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-utilities\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.818484 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-catalog-content\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.841016 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhtx5\" (UniqueName: \"kubernetes.io/projected/6d558882-5a65-41ae-bcf0-d13c7cecc034-kube-api-access-jhtx5\") pod \"redhat-operators-5zhxv\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.887779 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-c5jnv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.917511 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:14 crc kubenswrapper[4685]: E0128 12:23:14.918085 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.418058629 +0000 UTC m=+146.505472454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.933231 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.944392 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerStarted","Data":"2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74"} Jan 28 12:23:14 crc kubenswrapper[4685]: I0128 12:23:14.965021 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-s4474" podStartSLOduration=110.964999307 podStartE2EDuration="1m50.964999307s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:14.964458331 +0000 UTC m=+146.051872166" watchObservedRunningTime="2026-01-28 12:23:14.964999307 +0000 UTC m=+146.052413142" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.018636 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.018787 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.518765486 +0000 UTC m=+146.606179321 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.018947 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.019481 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wl9lt"] Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.020092 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.520077976 +0000 UTC m=+146.607491811 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.122474 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.122766 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.622737883 +0000 UTC m=+146.710151718 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.123014 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.123371 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.623362712 +0000 UTC m=+146.710776547 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.151545 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6wksk" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.204910 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-h6gqk" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.223892 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.224672 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.724642787 +0000 UTC m=+146.812056622 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.243631 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.308060 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.325465 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.326305 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.826287692 +0000 UTC m=+146.913701527 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.427257 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.427580 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/81539868-3464-4bfe-8a42-08c54ea1b0df-kubelet-dir\") pod \"81539868-3464-4bfe-8a42-08c54ea1b0df\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.427654 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81539868-3464-4bfe-8a42-08c54ea1b0df-kube-api-access\") pod \"81539868-3464-4bfe-8a42-08c54ea1b0df\" (UID: \"81539868-3464-4bfe-8a42-08c54ea1b0df\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.428234 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:15.928209386 +0000 UTC m=+147.015623231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.428266 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/81539868-3464-4bfe-8a42-08c54ea1b0df-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "81539868-3464-4bfe-8a42-08c54ea1b0df" (UID: "81539868-3464-4bfe-8a42-08c54ea1b0df"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.434097 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81539868-3464-4bfe-8a42-08c54ea1b0df-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "81539868-3464-4bfe-8a42-08c54ea1b0df" (UID: "81539868-3464-4bfe-8a42-08c54ea1b0df"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.463085 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5zhxv"] Jan 28 12:23:15 crc kubenswrapper[4685]: W0128 12:23:15.470459 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d558882_5a65_41ae_bcf0_d13c7cecc034.slice/crio-97fb83a0024b300d10e030c6bd7d8a962ca8f6edcda6becc26ca1cfa513066fb WatchSource:0}: Error finding container 97fb83a0024b300d10e030c6bd7d8a962ca8f6edcda6becc26ca1cfa513066fb: Status 404 returned error can't find the container with id 97fb83a0024b300d10e030c6bd7d8a962ca8f6edcda6becc26ca1cfa513066fb Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.528854 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.528962 4685 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/81539868-3464-4bfe-8a42-08c54ea1b0df-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.528974 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81539868-3464-4bfe-8a42-08c54ea1b0df-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.529546 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.029522302 +0000 UTC m=+147.116936137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.628670 4685 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-dwzsq container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.628761 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" podUID="75a32638-e479-4025-abd5-d00533347443" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.628979 4685 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-dwzsq container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.629018 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" podUID="75a32638-e479-4025-abd5-d00533347443" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.629705 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.630038 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.129994111 +0000 UTC m=+147.217407986 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.630459 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.630796 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.130782235 +0000 UTC m=+147.218196060 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.732603 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.732774 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.232740051 +0000 UTC m=+147.320153886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.733229 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.733622 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.233614948 +0000 UTC m=+147.321028783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.790160 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:15 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:15 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:15 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.790749 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.834587 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.834816 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.334769588 +0000 UTC m=+147.422183423 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.835438 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.835896 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.335887223 +0000 UTC m=+147.423301058 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.937312 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.937522 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.437493067 +0000 UTC m=+147.524906902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.937762 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:15 crc kubenswrapper[4685]: E0128 12:23:15.938098 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.438088385 +0000 UTC m=+147.525502220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.950736 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.950767 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"81539868-3464-4bfe-8a42-08c54ea1b0df","Type":"ContainerDied","Data":"f8efed0309bb0ffb9aa76b2ebbb47734ffc85da2af0bdc25cd9d17c8a2125435"} Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.950806 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8efed0309bb0ffb9aa76b2ebbb47734ffc85da2af0bdc25cd9d17c8a2125435" Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.952549 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerStarted","Data":"ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e"} Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.953435 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerStarted","Data":"97fb83a0024b300d10e030c6bd7d8a962ca8f6edcda6becc26ca1cfa513066fb"} Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.954509 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerStarted","Data":"af175ec1973081f50e23fa6ed31f3e80a857d9999a0dc987e18dd26963641541"} Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.956301 4685 generic.go:334] "Generic (PLEG): container finished" podID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerID="2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74" exitCode=0 Jan 28 12:23:15 crc kubenswrapper[4685]: I0128 12:23:15.956349 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerDied","Data":"2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74"} Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.038316 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.038541 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.538507843 +0000 UTC m=+147.625921708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.038674 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.038989 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.538981667 +0000 UTC m=+147.626395502 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.139496 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.139974 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.639931392 +0000 UTC m=+147.727345257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.241926 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.242380 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.742365931 +0000 UTC m=+147.829779766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.343469 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.343659 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.843629225 +0000 UTC m=+147.931043060 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.343795 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.344119 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.8441031 +0000 UTC m=+147.931516925 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.445363 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.445500 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.945475847 +0000 UTC m=+148.032889682 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.445828 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.446152 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:16.946142038 +0000 UTC m=+148.033555873 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.546387 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.546614 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.046579976 +0000 UTC m=+148.133993821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.546660 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.547150 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.047125923 +0000 UTC m=+148.134539778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.647532 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.647769 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.147733936 +0000 UTC m=+148.235147771 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.648136 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.648529 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.14851901 +0000 UTC m=+148.235932845 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.749788 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.749988 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.24995134 +0000 UTC m=+148.337365175 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.750328 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.750658 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.250644121 +0000 UTC m=+148.338057956 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.786490 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:16 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:16 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:16 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.786558 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.851846 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.852132 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.35208905 +0000 UTC m=+148.439502885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.852345 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.852842 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.352817323 +0000 UTC m=+148.440231148 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.953179 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.953336 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.453308353 +0000 UTC m=+148.540722188 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.953463 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:16 crc kubenswrapper[4685]: E0128 12:23:16.953986 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.453959503 +0000 UTC m=+148.541373338 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.964920 4685 generic.go:334] "Generic (PLEG): container finished" podID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerID="ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e" exitCode=0 Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.965013 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerDied","Data":"ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e"} Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.967084 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ec91605c-e13c-49be-9329-217decae5afd","Type":"ContainerStarted","Data":"f4cf70fe8cb9bf9cb51eea09542288d92bf81e117bde3bd84c970c34e7352de0"} Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.968538 4685 generic.go:334] "Generic (PLEG): container finished" podID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerID="17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99" exitCode=0 Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.968608 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntg69" event={"ID":"7d2d54a9-9f8c-4158-8551-c4351bca8c19","Type":"ContainerDied","Data":"17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99"} Jan 28 12:23:16 crc kubenswrapper[4685]: I0128 12:23:16.970424 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerStarted","Data":"2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10"} Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.054896 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.055126 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.555078762 +0000 UTC m=+148.642492597 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.056842 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.057254 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.557231428 +0000 UTC m=+148.644645363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.158814 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.158971 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.658943506 +0000 UTC m=+148.746357341 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.159265 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.159615 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.659606117 +0000 UTC m=+148.747019952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.260820 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.261059 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.761012775 +0000 UTC m=+148.848426650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.261326 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.261806 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.761789179 +0000 UTC m=+148.849203064 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.362585 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.362884 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.862842946 +0000 UTC m=+148.950256821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.363069 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.363655 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.86363657 +0000 UTC m=+148.951050445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.464467 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.464724 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.964685998 +0000 UTC m=+149.052099863 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.465035 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.465448 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:17.96542763 +0000 UTC m=+149.052841475 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.566394 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.566647 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.066609292 +0000 UTC m=+149.154023187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.567092 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.567608 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.067591462 +0000 UTC m=+149.155005327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.668717 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.668850 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.168830535 +0000 UTC m=+149.256244370 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.669132 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.669444 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.169431004 +0000 UTC m=+149.256844839 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.770472 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.770840 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.270803951 +0000 UTC m=+149.358217816 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.771109 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.771429 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.2714162 +0000 UTC m=+149.358830035 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.788055 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:17 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:17 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:17 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.788151 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.873226 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.873418 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.373380994 +0000 UTC m=+149.460794829 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.873628 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.874009 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.373993283 +0000 UTC m=+149.461407128 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.975440 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.975711 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.47567236 +0000 UTC m=+149.563086225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.976027 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:17 crc kubenswrapper[4685]: E0128 12:23:17.976507 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.476483055 +0000 UTC m=+149.563896950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:17 crc kubenswrapper[4685]: I0128 12:23:17.978465 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerStarted","Data":"5c353ad8e7249cc71fa179838d756701d0cbe36903c47ceb51d14a85144186a1"} Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.077498 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.077743 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.577704107 +0000 UTC m=+149.665117972 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.077840 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.078460 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.5784441 +0000 UTC m=+149.665857975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.179559 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.179772 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.679731224 +0000 UTC m=+149.767145099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.179954 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.180583 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.68056511 +0000 UTC m=+149.767978985 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.281546 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.281776 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.781733061 +0000 UTC m=+149.869146936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.282098 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.282617 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.782593297 +0000 UTC m=+149.870007162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.383634 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.383895 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.883857801 +0000 UTC m=+149.971271676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.384065 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.384466 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.88444936 +0000 UTC m=+149.971863205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.485580 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.485715 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.985690013 +0000 UTC m=+150.073103848 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.485975 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.486304 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:18.986295991 +0000 UTC m=+150.073709826 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.587154 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.587474 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.087432761 +0000 UTC m=+150.174846626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.587787 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.588143 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.088127213 +0000 UTC m=+150.175541048 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.689379 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.689569 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.189537281 +0000 UTC m=+150.276951126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.689851 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.690324 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.190300074 +0000 UTC m=+150.277713939 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.787231 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:18 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:18 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:18 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.787316 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.790964 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.791123 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.291088073 +0000 UTC m=+150.378501938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.791317 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.791811 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.291794255 +0000 UTC m=+150.379208130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.892359 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.892568 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.392537493 +0000 UTC m=+150.479951328 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.893033 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.893342 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.393328147 +0000 UTC m=+150.480741982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.894630 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dwzsq" Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.911924 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" podStartSLOduration=115.91190486 podStartE2EDuration="1m55.91190486s" podCreationTimestamp="2026-01-28 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:17.004295695 +0000 UTC m=+148.091709550" watchObservedRunningTime="2026-01-28 12:23:18.91190486 +0000 UTC m=+149.999318695" Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.985106 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerStarted","Data":"7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589"} Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.986757 4685 generic.go:334] "Generic (PLEG): container finished" podID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerID="2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10" exitCode=0 Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.986841 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerDied","Data":"2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10"} Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.988203 4685 generic.go:334] "Generic (PLEG): container finished" podID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerID="5c353ad8e7249cc71fa179838d756701d0cbe36903c47ceb51d14a85144186a1" exitCode=0 Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.988296 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerDied","Data":"5c353ad8e7249cc71fa179838d756701d0cbe36903c47ceb51d14a85144186a1"} Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.990374 4685 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.994103 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.994312 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.494286732 +0000 UTC m=+150.581700577 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:18 crc kubenswrapper[4685]: I0128 12:23:18.994541 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:18 crc kubenswrapper[4685]: E0128 12:23:18.994969 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.494952882 +0000 UTC m=+150.582366727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.095340 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.095572 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.595538655 +0000 UTC m=+150.682952500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.095779 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.096114 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.596101663 +0000 UTC m=+150.683515498 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.196943 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.197102 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.697078287 +0000 UTC m=+150.784492122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.197316 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.197670 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.697660125 +0000 UTC m=+150.785073960 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.298085 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.298230 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.798207017 +0000 UTC m=+150.885620862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.298286 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.298586 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.798578818 +0000 UTC m=+150.885992653 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.354372 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.354424 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.355691 4685 patch_prober.go:28] interesting pod/apiserver-76f77b778f-zp7xc container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.22:8443/livez\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.355735 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" podUID="b22a7729-d10a-412c-8df1-30992ba607b0" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.22:8443/livez\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.399382 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.399631 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.899599005 +0000 UTC m=+150.987012850 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.399813 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.400501 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:19.900468262 +0000 UTC m=+150.987882177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.500860 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.501216 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.001142757 +0000 UTC m=+151.088556622 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.501952 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.502526 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.002500509 +0000 UTC m=+151.089914384 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.603513 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.604071 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.104042811 +0000 UTC m=+151.191456686 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.676531 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.685109 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-pwbtq" Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.706541 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.707301 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.207268866 +0000 UTC m=+151.294682751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.790983 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:19 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:19 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:19 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.791077 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.808464 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.808772 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.308713125 +0000 UTC m=+151.396127000 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.809234 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.812021 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.311999516 +0000 UTC m=+151.399413371 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.910915 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.911082 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.411056552 +0000 UTC m=+151.498470397 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.911265 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:19 crc kubenswrapper[4685]: E0128 12:23:19.911556 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.411548307 +0000 UTC m=+151.498962142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.994471 4685 generic.go:334] "Generic (PLEG): container finished" podID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerID="7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589" exitCode=0 Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.994554 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerDied","Data":"7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589"} Jan 28 12:23:19 crc kubenswrapper[4685]: I0128 12:23:19.995844 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerStarted","Data":"534fdaede1d8e595c2d352c6ef262c9ec54acb2f9a0fa46e80b8d010e711e97a"} Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.011919 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.012117 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.512085109 +0000 UTC m=+151.599498944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.012520 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.012939 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.512928465 +0000 UTC m=+151.600342380 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.113322 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.113466 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.613434975 +0000 UTC m=+151.700848840 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.113902 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.114303 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.614292542 +0000 UTC m=+151.701706507 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.222227 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.222551 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.72251156 +0000 UTC m=+151.809925425 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.222713 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.223195 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.72315079 +0000 UTC m=+151.810564655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.323821 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.324057 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.824026841 +0000 UTC m=+151.911440706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.324234 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.324763 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.824744543 +0000 UTC m=+151.912158408 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.425408 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.425626 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.925583174 +0000 UTC m=+152.012997049 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.425762 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.426279 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:20.926262275 +0000 UTC m=+152.013676150 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.527213 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.527444 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.027413475 +0000 UTC m=+152.114827350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.527643 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.528094 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.028077546 +0000 UTC m=+152.115491421 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.629295 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.629509 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.129471564 +0000 UTC m=+152.216885419 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.629749 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.631259 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.131243268 +0000 UTC m=+152.218657093 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.730867 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.731107 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.231043557 +0000 UTC m=+152.318457392 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.731430 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.731963 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.231941115 +0000 UTC m=+152.319354990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.785325 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:20 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:20 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:20 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.785416 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.832855 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.832995 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.332966291 +0000 UTC m=+152.420380126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.833214 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.833525 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.333512148 +0000 UTC m=+152.420925983 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.933999 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.934388 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.434317058 +0000 UTC m=+152.521730933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:20 crc kubenswrapper[4685]: I0128 12:23:20.934821 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:20 crc kubenswrapper[4685]: E0128 12:23:20.935500 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.435482584 +0000 UTC m=+152.522896459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.004616 4685 generic.go:334] "Generic (PLEG): container finished" podID="ec91605c-e13c-49be-9329-217decae5afd" containerID="f4cf70fe8cb9bf9cb51eea09542288d92bf81e117bde3bd84c970c34e7352de0" exitCode=0 Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.004702 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ec91605c-e13c-49be-9329-217decae5afd","Type":"ContainerDied","Data":"f4cf70fe8cb9bf9cb51eea09542288d92bf81e117bde3bd84c970c34e7352de0"} Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.007201 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerStarted","Data":"fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f"} Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.036796 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.036973 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.536949844 +0000 UTC m=+152.624363679 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.037411 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.037842 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.537825251 +0000 UTC m=+152.625239086 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.138792 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.138989 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.63896039 +0000 UTC m=+152.726374225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.139123 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.139568 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.639555439 +0000 UTC m=+152.726969264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.241023 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.241464 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.741432472 +0000 UTC m=+152.828846317 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.343049 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.343487 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.843469179 +0000 UTC m=+152.930883014 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.444240 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.444372 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.944351541 +0000 UTC m=+153.031765386 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.444607 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.444932 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:21.944925459 +0000 UTC m=+153.032339294 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.546663 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.546887 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.046860142 +0000 UTC m=+153.134273987 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.547200 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.547683 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.047661487 +0000 UTC m=+153.135075342 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.648430 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.648686 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.148654133 +0000 UTC m=+153.236068028 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.648868 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.649582 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.14954529 +0000 UTC m=+153.236959165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.750328 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.750627 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.250587447 +0000 UTC m=+153.338001312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.750717 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.751269 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.251243047 +0000 UTC m=+153.338656922 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.786827 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:21 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:21 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:21 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.786914 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.852281 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.852520 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.35248448 +0000 UTC m=+153.439898325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.852768 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.853130 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.35311523 +0000 UTC m=+153.440529065 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.954008 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.954281 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.454241589 +0000 UTC m=+153.541655464 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:21 crc kubenswrapper[4685]: I0128 12:23:21.954458 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:21 crc kubenswrapper[4685]: E0128 12:23:21.954948 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.454931911 +0000 UTC m=+153.542345786 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.014184 4685 generic.go:334] "Generic (PLEG): container finished" podID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerID="534fdaede1d8e595c2d352c6ef262c9ec54acb2f9a0fa46e80b8d010e711e97a" exitCode=0 Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.014289 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerDied","Data":"534fdaede1d8e595c2d352c6ef262c9ec54acb2f9a0fa46e80b8d010e711e97a"} Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.016839 4685 generic.go:334] "Generic (PLEG): container finished" podID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerID="fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f" exitCode=0 Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.016964 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerDied","Data":"fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f"} Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.056432 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.056819 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.556791123 +0000 UTC m=+153.644204968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.057098 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.057734 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.557709671 +0000 UTC m=+153.645123536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.159012 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.160562 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.660539743 +0000 UTC m=+153.747953568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.261113 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.261600 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.76158204 +0000 UTC m=+153.848995875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.301197 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.362704 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.363149 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec91605c-e13c-49be-9329-217decae5afd-kubelet-dir\") pod \"ec91605c-e13c-49be-9329-217decae5afd\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.363188 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec91605c-e13c-49be-9329-217decae5afd-kube-api-access\") pod \"ec91605c-e13c-49be-9329-217decae5afd\" (UID: \"ec91605c-e13c-49be-9329-217decae5afd\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.364353 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.86433122 +0000 UTC m=+153.951745065 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.364376 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec91605c-e13c-49be-9329-217decae5afd-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ec91605c-e13c-49be-9329-217decae5afd" (UID: "ec91605c-e13c-49be-9329-217decae5afd"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.380423 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec91605c-e13c-49be-9329-217decae5afd-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ec91605c-e13c-49be-9329-217decae5afd" (UID: "ec91605c-e13c-49be-9329-217decae5afd"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.464903 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.465004 4685 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec91605c-e13c-49be-9329-217decae5afd-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.465014 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec91605c-e13c-49be-9329-217decae5afd-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.465334 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:22.965316856 +0000 UTC m=+154.052730681 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.565769 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.565957 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.065915939 +0000 UTC m=+154.153329824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.566202 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.566554 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.066536938 +0000 UTC m=+154.153950793 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.667984 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.668263 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.168216864 +0000 UTC m=+154.255630709 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.668673 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.669125 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.169105502 +0000 UTC m=+154.256519417 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.769700 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.769921 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.26985577 +0000 UTC m=+154.357269615 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.770215 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.770678 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.270665515 +0000 UTC m=+154.358079360 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.788670 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:22 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:22 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:22 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.788751 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.872130 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.872321 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.37228664 +0000 UTC m=+154.459700485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.873101 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.873538 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.373527688 +0000 UTC m=+154.460941533 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.974032 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.974267 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.474229714 +0000 UTC m=+154.561643589 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:22 crc kubenswrapper[4685]: I0128 12:23:22.974428 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:22 crc kubenswrapper[4685]: E0128 12:23:22.974778 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.474763961 +0000 UTC m=+154.562177796 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.024483 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.024468 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ec91605c-e13c-49be-9329-217decae5afd","Type":"ContainerDied","Data":"127f4c1a8a1f1ed8f332b9fd52d8f5ba12fd7321de28c19fb48b0f1695f52795"} Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.024624 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="127f4c1a8a1f1ed8f332b9fd52d8f5ba12fd7321de28c19fb48b0f1695f52795" Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.075542 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.075828 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.575783277 +0000 UTC m=+154.663197142 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.076059 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.076800 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.576773248 +0000 UTC m=+154.664187093 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.177913 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.178147 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.678112054 +0000 UTC m=+154.765525909 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.178224 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.178748 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.678723073 +0000 UTC m=+154.766136948 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.280344 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.280721 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.780677028 +0000 UTC m=+154.868090903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.281046 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.281590 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.781565675 +0000 UTC m=+154.868979520 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.382505 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.382694 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.882662574 +0000 UTC m=+154.970076409 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.382996 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.385272 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.885246784 +0000 UTC m=+154.972660659 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.484997 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.485233 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.985200427 +0000 UTC m=+155.072614272 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.485363 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.486148 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:23.986134456 +0000 UTC m=+155.073548301 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.586917 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.587132 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.08709476 +0000 UTC m=+155.174508625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.587467 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.587835 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.087821033 +0000 UTC m=+155.175234868 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.689024 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.689274 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.189240371 +0000 UTC m=+155.276654206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.689402 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.689763 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.189751817 +0000 UTC m=+155.277165652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.785979 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:23 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:23 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:23 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.786060 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.790898 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.791086 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.291056942 +0000 UTC m=+155.378470787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.791328 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.791797 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.291775474 +0000 UTC m=+155.379189349 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.892945 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.893228 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.393146641 +0000 UTC m=+155.480560506 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.893308 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.893794 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.393778351 +0000 UTC m=+155.481192226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.994328 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.994605 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.494572 +0000 UTC m=+155.581985875 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:23 crc kubenswrapper[4685]: I0128 12:23:23.994782 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:23 crc kubenswrapper[4685]: E0128 12:23:23.995162 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.495146118 +0000 UTC m=+155.582559963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.034494 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" event={"ID":"54c1da50-0209-4285-bc15-91427e15241d","Type":"ContainerStarted","Data":"a650f387aa6012d3958457ddce5c66c93325eff0c0e476ca4e00f52454f5d9a2"} Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.095703 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.095949 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.595919656 +0000 UTC m=+155.683333501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.096208 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.096648 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.596634148 +0000 UTC m=+155.684047993 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.197761 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.198246 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.698198482 +0000 UTC m=+155.785612357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.198384 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.199069 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.699050868 +0000 UTC m=+155.786464753 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.299672 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.299900 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.799862708 +0000 UTC m=+155.887276573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.300293 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.300887 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.800865079 +0000 UTC m=+155.888278944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.367091 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.374238 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-zp7xc" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.401539 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.401869 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.901817073 +0000 UTC m=+155.989230968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.402439 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.403278 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:24.903237417 +0000 UTC m=+155.990651322 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.504376 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.504613 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.004575893 +0000 UTC m=+156.091989748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.504686 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.505078 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.005066698 +0000 UTC m=+156.092480533 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.589241 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.589309 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.589355 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.589390 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.589470 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.589905 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"046d8b8d71a37120bd693016fed417e65b4f51ad4deb4d0f827b0dca8a2e2f94"} pod="openshift-console/downloads-7954f5f757-54gh9" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.589992 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" containerID="cri-o://046d8b8d71a37120bd693016fed417e65b4f51ad4deb4d0f827b0dca8a2e2f94" gracePeriod=2 Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.590118 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.590199 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.605507 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.605972 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.10595558 +0000 UTC m=+156.193369415 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.661652 4685 patch_prober.go:28] interesting pod/console-f9d7485db-z2lzl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.661714 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z2lzl" podUID="08663495-9331-4b6f-b82a-67b308a9afa3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.707377 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.707766 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.20775015 +0000 UTC m=+156.295163985 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.808853 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:24 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:24 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:24 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.808903 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.809114 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.309061485 +0000 UTC m=+156.396475350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.809206 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.809102 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.809696 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.309681424 +0000 UTC m=+156.397095259 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.910909 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.911102 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.411074312 +0000 UTC m=+156.498488137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.911215 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:24 crc kubenswrapper[4685]: E0128 12:23:24.911562 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.411554597 +0000 UTC m=+156.498968432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:24 crc kubenswrapper[4685]: I0128 12:23:24.927280 4685 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.012320 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.012498 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.51246993 +0000 UTC m=+156.599883765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.012783 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.013342 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.513318326 +0000 UTC m=+156.600732191 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.114009 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.114275 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.614235668 +0000 UTC m=+156.701649533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.114777 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.115838 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.615819677 +0000 UTC m=+156.703233702 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.215930 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.216216 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.716140102 +0000 UTC m=+156.803553977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.216396 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.216839 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.716818253 +0000 UTC m=+156.804232118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.317711 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.317917 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.817890861 +0000 UTC m=+156.905304696 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.318836 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:25 crc kubenswrapper[4685]: E0128 12:23:25.319390 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 12:23:25.819367056 +0000 UTC m=+156.906780901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dxl25" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.383229 4685 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-28T12:23:24.927305223Z","Handler":null,"Name":""} Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.387753 4685 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.387811 4685 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.420868 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.434952 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.523202 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.785934 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:25 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:25 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:25 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:25 crc kubenswrapper[4685]: I0128 12:23:25.786223 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.048189 4685 generic.go:334] "Generic (PLEG): container finished" podID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerID="046d8b8d71a37120bd693016fed417e65b4f51ad4deb4d0f827b0dca8a2e2f94" exitCode=0 Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.048300 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-54gh9" event={"ID":"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde","Type":"ContainerDied","Data":"046d8b8d71a37120bd693016fed417e65b4f51ad4deb4d0f827b0dca8a2e2f94"} Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.061547 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" event={"ID":"54c1da50-0209-4285-bc15-91427e15241d","Type":"ContainerStarted","Data":"66f2b682bbabba7f9a5a3097c489c48f8da5c7ff7f26d9ccd7e4dabaa1a50267"} Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.553497 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.767221 4685 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.767309 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.786382 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:26 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:26 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:26 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.786476 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.805723 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dxl25\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:26 crc kubenswrapper[4685]: I0128 12:23:26.858155 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:27 crc kubenswrapper[4685]: I0128 12:23:27.054367 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dxl25"] Jan 28 12:23:27 crc kubenswrapper[4685]: W0128 12:23:27.064816 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46b82026_d586_40b2_ad5b_fc08674d7067.slice/crio-ddb43fb78c7d23ef980b6048567ed165fef5a9cf5560c0c6e5cb5f18f470338f WatchSource:0}: Error finding container ddb43fb78c7d23ef980b6048567ed165fef5a9cf5560c0c6e5cb5f18f470338f: Status 404 returned error can't find the container with id ddb43fb78c7d23ef980b6048567ed165fef5a9cf5560c0c6e5cb5f18f470338f Jan 28 12:23:27 crc kubenswrapper[4685]: I0128 12:23:27.788002 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:27 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:27 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:27 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:27 crc kubenswrapper[4685]: I0128 12:23:27.788440 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.081151 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" event={"ID":"46b82026-d586-40b2-ad5b-fc08674d7067","Type":"ContainerStarted","Data":"a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d"} Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.081226 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" event={"ID":"46b82026-d586-40b2-ad5b-fc08674d7067","Type":"ContainerStarted","Data":"ddb43fb78c7d23ef980b6048567ed165fef5a9cf5560c0c6e5cb5f18f470338f"} Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.081296 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.101865 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" event={"ID":"54c1da50-0209-4285-bc15-91427e15241d","Type":"ContainerStarted","Data":"19f07aa0fc3a4569d03eb2f3b5f2a982817b887f91055abf7989ee130ec23c05"} Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.104646 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-54gh9" event={"ID":"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde","Type":"ContainerStarted","Data":"036948ada80734447b14666944452c8b85a7a4e1c083725f9af575b511ab4e29"} Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.114797 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" podStartSLOduration=124.114780299 podStartE2EDuration="2m4.114780299s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:28.111230839 +0000 UTC m=+159.198644674" watchObservedRunningTime="2026-01-28 12:23:28.114780299 +0000 UTC m=+159.202194134" Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.786630 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:28 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:28 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:28 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:28 crc kubenswrapper[4685]: I0128 12:23:28.787016 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.127765 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.127831 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.150067 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-xvxqg" podStartSLOduration=37.150036314 podStartE2EDuration="37.150036314s" podCreationTimestamp="2026-01-28 12:22:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:23:29.145771222 +0000 UTC m=+160.233185057" watchObservedRunningTime="2026-01-28 12:23:29.150036314 +0000 UTC m=+160.237450149" Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.788447 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:29 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:29 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:29 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.788536 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.850228 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7pxnw"] Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.850757 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" containerID="cri-o://e4554348b67369a8ade46f5a14ca7a0857f43281640faf12fe425e56d0d2f443" gracePeriod=30 Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.864793 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r"] Jan 28 12:23:29 crc kubenswrapper[4685]: I0128 12:23:29.865034 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" containerID="cri-o://23e97a6f06efe0c2c8722fcc20556f395c12b349a256f9f59cd25aa343c8fd23" gracePeriod=30 Jan 28 12:23:30 crc kubenswrapper[4685]: I0128 12:23:30.787208 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:30 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:30 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:30 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:30 crc kubenswrapper[4685]: I0128 12:23:30.787331 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.146535 4685 generic.go:334] "Generic (PLEG): container finished" podID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerID="e4554348b67369a8ade46f5a14ca7a0857f43281640faf12fe425e56d0d2f443" exitCode=0 Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.146624 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" event={"ID":"edb4a604-6b03-48d4-b5ab-09e266b5eef8","Type":"ContainerDied","Data":"e4554348b67369a8ade46f5a14ca7a0857f43281640faf12fe425e56d0d2f443"} Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.149280 4685 generic.go:334] "Generic (PLEG): container finished" podID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerID="23e97a6f06efe0c2c8722fcc20556f395c12b349a256f9f59cd25aa343c8fd23" exitCode=0 Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.149324 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" event={"ID":"6d7a1a22-14a6-419e-b4f1-ebf636f248f9","Type":"ContainerDied","Data":"23e97a6f06efe0c2c8722fcc20556f395c12b349a256f9f59cd25aa343c8fd23"} Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.442413 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.442500 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.442547 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.442609 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.450680 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.451221 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.454962 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.467729 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.471350 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.483890 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.496155 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.786945 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:31 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:31 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:31 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:31 crc kubenswrapper[4685]: I0128 12:23:31.787025 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:32 crc kubenswrapper[4685]: I0128 12:23:32.785951 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:32 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:32 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:32 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:32 crc kubenswrapper[4685]: I0128 12:23:32.786016 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:33 crc kubenswrapper[4685]: I0128 12:23:33.785720 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:33 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:33 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:33 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:33 crc kubenswrapper[4685]: I0128 12:23:33.785782 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.589716 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.589996 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.589717 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.590352 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.590552 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.591600 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.591643 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.661375 4685 patch_prober.go:28] interesting pod/console-f9d7485db-z2lzl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.661663 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z2lzl" podUID="08663495-9331-4b6f-b82a-67b308a9afa3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.786419 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:34 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:34 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:34 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:34 crc kubenswrapper[4685]: I0128 12:23:34.786510 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:35 crc kubenswrapper[4685]: I0128 12:23:35.487105 4685 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7pxnw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": context deadline exceeded" start-of-body= Jan 28 12:23:35 crc kubenswrapper[4685]: I0128 12:23:35.487736 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": context deadline exceeded" Jan 28 12:23:35 crc kubenswrapper[4685]: I0128 12:23:35.648557 4685 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2966r container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:23:35 crc kubenswrapper[4685]: I0128 12:23:35.648629 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:23:35 crc kubenswrapper[4685]: I0128 12:23:35.786605 4685 patch_prober.go:28] interesting pod/router-default-5444994796-xvw2d container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 12:23:35 crc kubenswrapper[4685]: [-]has-synced failed: reason withheld Jan 28 12:23:35 crc kubenswrapper[4685]: [+]process-running ok Jan 28 12:23:35 crc kubenswrapper[4685]: healthz check failed Jan 28 12:23:35 crc kubenswrapper[4685]: I0128 12:23:35.787283 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xvw2d" podUID="4cd9e183-ef71-4bc0-af54-df333c728cc4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 12:23:36 crc kubenswrapper[4685]: I0128 12:23:36.786564 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:23:36 crc kubenswrapper[4685]: I0128 12:23:36.789020 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-xvw2d" Jan 28 12:23:44 crc kubenswrapper[4685]: I0128 12:23:44.590708 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:44 crc kubenswrapper[4685]: I0128 12:23:44.591584 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:44 crc kubenswrapper[4685]: I0128 12:23:44.590877 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:44 crc kubenswrapper[4685]: I0128 12:23:44.591928 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:44 crc kubenswrapper[4685]: I0128 12:23:44.673336 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:23:44 crc kubenswrapper[4685]: I0128 12:23:44.677579 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-z2lzl" Jan 28 12:23:45 crc kubenswrapper[4685]: I0128 12:23:45.139539 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-922md" Jan 28 12:23:45 crc kubenswrapper[4685]: I0128 12:23:45.486789 4685 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7pxnw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:23:45 crc kubenswrapper[4685]: I0128 12:23:45.486972 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:23:45 crc kubenswrapper[4685]: I0128 12:23:45.651021 4685 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2966r container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: i/o timeout" start-of-body= Jan 28 12:23:45 crc kubenswrapper[4685]: I0128 12:23:45.651130 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: i/o timeout" Jan 28 12:23:46 crc kubenswrapper[4685]: I0128 12:23:46.880369 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:23:47 crc kubenswrapper[4685]: I0128 12:23:47.288907 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:23:47 crc kubenswrapper[4685]: I0128 12:23:47.303488 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f0d7b7e-1577-4289-9043-ddf8dd9a48ef-metrics-certs\") pod \"network-metrics-daemon-5x4kp\" (UID: \"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef\") " pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:23:47 crc kubenswrapper[4685]: I0128 12:23:47.508573 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5x4kp" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.023612 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 12:23:53 crc kubenswrapper[4685]: E0128 12:23:53.025960 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec91605c-e13c-49be-9329-217decae5afd" containerName="pruner" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.025984 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec91605c-e13c-49be-9329-217decae5afd" containerName="pruner" Jan 28 12:23:53 crc kubenswrapper[4685]: E0128 12:23:53.026002 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81539868-3464-4bfe-8a42-08c54ea1b0df" containerName="pruner" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.026011 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="81539868-3464-4bfe-8a42-08c54ea1b0df" containerName="pruner" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.026189 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec91605c-e13c-49be-9329-217decae5afd" containerName="pruner" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.026209 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="81539868-3464-4bfe-8a42-08c54ea1b0df" containerName="pruner" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.028366 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.036119 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.037771 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.040460 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.097932 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/589536ea-5968-4231-8569-231a05f2e7e5-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.098009 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/589536ea-5968-4231-8569-231a05f2e7e5-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.199013 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/589536ea-5968-4231-8569-231a05f2e7e5-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.199561 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/589536ea-5968-4231-8569-231a05f2e7e5-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.199118 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/589536ea-5968-4231-8569-231a05f2e7e5-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.237992 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/589536ea-5968-4231-8569-231a05f2e7e5-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:53 crc kubenswrapper[4685]: I0128 12:23:53.364413 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.590951 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.591022 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.591060 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.591114 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.591239 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.591991 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.592193 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"036948ada80734447b14666944452c8b85a7a4e1c083725f9af575b511ab4e29"} pod="openshift-console/downloads-7954f5f757-54gh9" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.592245 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" containerID="cri-o://036948ada80734447b14666944452c8b85a7a4e1c083725f9af575b511ab4e29" gracePeriod=2 Jan 28 12:23:54 crc kubenswrapper[4685]: I0128 12:23:54.592113 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:23:55 crc kubenswrapper[4685]: I0128 12:23:55.486838 4685 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7pxnw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: i/o timeout (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:23:55 crc kubenswrapper[4685]: I0128 12:23:55.487298 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: i/o timeout (Client.Timeout exceeded while awaiting headers)" Jan 28 12:23:55 crc kubenswrapper[4685]: I0128 12:23:55.648780 4685 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2966r container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 12:23:55 crc kubenswrapper[4685]: I0128 12:23:55.649040 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.313688 4685 generic.go:334] "Generic (PLEG): container finished" podID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerID="036948ada80734447b14666944452c8b85a7a4e1c083725f9af575b511ab4e29" exitCode=0 Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.313748 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-54gh9" event={"ID":"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde","Type":"ContainerDied","Data":"036948ada80734447b14666944452c8b85a7a4e1c083725f9af575b511ab4e29"} Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.313791 4685 scope.go:117] "RemoveContainer" containerID="046d8b8d71a37120bd693016fed417e65b4f51ad4deb4d0f827b0dca8a2e2f94" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.636609 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.649523 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.669876 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s"] Jan 28 12:23:56 crc kubenswrapper[4685]: E0128 12:23:56.670119 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.670134 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" Jan 28 12:23:56 crc kubenswrapper[4685]: E0128 12:23:56.670144 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.670149 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.670244 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" containerName="route-controller-manager" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.670261 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" containerName="controller-manager" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.670614 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.687598 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s"] Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.756194 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd752\" (UniqueName: \"kubernetes.io/projected/edb4a604-6b03-48d4-b5ab-09e266b5eef8-kube-api-access-hd752\") pod \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.756507 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-config\") pod \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.756607 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-serving-cert\") pod \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.756664 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-config\") pod \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.756697 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-client-ca\") pod \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.756748 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edb4a604-6b03-48d4-b5ab-09e266b5eef8-serving-cert\") pod \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.757388 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8p76\" (UniqueName: \"kubernetes.io/projected/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-kube-api-access-v8p76\") pod \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.757400 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-client-ca" (OuterVolumeSpecName: "client-ca") pod "edb4a604-6b03-48d4-b5ab-09e266b5eef8" (UID: "edb4a604-6b03-48d4-b5ab-09e266b5eef8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.757437 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-client-ca\") pod \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\" (UID: \"6d7a1a22-14a6-419e-b4f1-ebf636f248f9\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.757493 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-proxy-ca-bundles\") pod \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\" (UID: \"edb4a604-6b03-48d4-b5ab-09e266b5eef8\") " Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.757551 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-config" (OuterVolumeSpecName: "config") pod "6d7a1a22-14a6-419e-b4f1-ebf636f248f9" (UID: "6d7a1a22-14a6-419e-b4f1-ebf636f248f9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.757876 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/134521a2-eac8-4019-b5f0-847968893cb8-serving-cert\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758003 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-client-ca" (OuterVolumeSpecName: "client-ca") pod "6d7a1a22-14a6-419e-b4f1-ebf636f248f9" (UID: "6d7a1a22-14a6-419e-b4f1-ebf636f248f9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758002 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz4bt\" (UniqueName: \"kubernetes.io/projected/134521a2-eac8-4019-b5f0-847968893cb8-kube-api-access-mz4bt\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758266 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-client-ca\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758334 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "edb4a604-6b03-48d4-b5ab-09e266b5eef8" (UID: "edb4a604-6b03-48d4-b5ab-09e266b5eef8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758344 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-config\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758455 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-config" (OuterVolumeSpecName: "config") pod "edb4a604-6b03-48d4-b5ab-09e266b5eef8" (UID: "edb4a604-6b03-48d4-b5ab-09e266b5eef8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758589 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758613 4685 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758630 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758643 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.758656 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edb4a604-6b03-48d4-b5ab-09e266b5eef8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.763664 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edb4a604-6b03-48d4-b5ab-09e266b5eef8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "edb4a604-6b03-48d4-b5ab-09e266b5eef8" (UID: "edb4a604-6b03-48d4-b5ab-09e266b5eef8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.766879 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edb4a604-6b03-48d4-b5ab-09e266b5eef8-kube-api-access-hd752" (OuterVolumeSpecName: "kube-api-access-hd752") pod "edb4a604-6b03-48d4-b5ab-09e266b5eef8" (UID: "edb4a604-6b03-48d4-b5ab-09e266b5eef8"). InnerVolumeSpecName "kube-api-access-hd752". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.769495 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6d7a1a22-14a6-419e-b4f1-ebf636f248f9" (UID: "6d7a1a22-14a6-419e-b4f1-ebf636f248f9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.772902 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-kube-api-access-v8p76" (OuterVolumeSpecName: "kube-api-access-v8p76") pod "6d7a1a22-14a6-419e-b4f1-ebf636f248f9" (UID: "6d7a1a22-14a6-419e-b4f1-ebf636f248f9"). InnerVolumeSpecName "kube-api-access-v8p76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.859665 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/134521a2-eac8-4019-b5f0-847968893cb8-serving-cert\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.859809 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz4bt\" (UniqueName: \"kubernetes.io/projected/134521a2-eac8-4019-b5f0-847968893cb8-kube-api-access-mz4bt\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.859857 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-client-ca\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.859906 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-config\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.860026 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.860047 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edb4a604-6b03-48d4-b5ab-09e266b5eef8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.860065 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8p76\" (UniqueName: \"kubernetes.io/projected/6d7a1a22-14a6-419e-b4f1-ebf636f248f9-kube-api-access-v8p76\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.860085 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd752\" (UniqueName: \"kubernetes.io/projected/edb4a604-6b03-48d4-b5ab-09e266b5eef8-kube-api-access-hd752\") on node \"crc\" DevicePath \"\"" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.879246 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-client-ca\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.879443 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-config\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.879611 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/134521a2-eac8-4019-b5f0-847968893cb8-serving-cert\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:56 crc kubenswrapper[4685]: I0128 12:23:56.890090 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz4bt\" (UniqueName: \"kubernetes.io/projected/134521a2-eac8-4019-b5f0-847968893cb8-kube-api-access-mz4bt\") pod \"route-controller-manager-856d844f48-4zg4s\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.003036 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.069574 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.069696 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.322274 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" event={"ID":"edb4a604-6b03-48d4-b5ab-09e266b5eef8","Type":"ContainerDied","Data":"282c7f186f9f420ee68b24b3cc1e4e502f363b0f9bd49adaec907300e8d49178"} Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.322336 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7pxnw" Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.325840 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" event={"ID":"6d7a1a22-14a6-419e-b4f1-ebf636f248f9","Type":"ContainerDied","Data":"21fb185e8cadb0d17dc3976f09f12739b022a9d7cd4ec9c6c82073e7b80c3800"} Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.325933 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r" Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.372166 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r"] Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.379370 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2966r"] Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.383889 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7pxnw"] Jan 28 12:23:57 crc kubenswrapper[4685]: I0128 12:23:57.387748 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7pxnw"] Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.010447 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.012703 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.022010 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.079121 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-var-lock\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.079201 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.079230 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fd0033f-a806-4189-8e54-f83b1f72f37d-kube-api-access\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.179944 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-var-lock\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.180013 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.180038 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fd0033f-a806-4189-8e54-f83b1f72f37d-kube-api-access\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.180107 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-var-lock\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.180230 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.205883 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fd0033f-a806-4189-8e54-f83b1f72f37d-kube-api-access\") pod \"installer-9-crc\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.335376 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.557070 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d7a1a22-14a6-419e-b4f1-ebf636f248f9" path="/var/lib/kubelet/pods/6d7a1a22-14a6-419e-b4f1-ebf636f248f9/volumes" Jan 28 12:23:58 crc kubenswrapper[4685]: I0128 12:23:58.558315 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edb4a604-6b03-48d4-b5ab-09e266b5eef8" path="/var/lib/kubelet/pods/edb4a604-6b03-48d4-b5ab-09e266b5eef8/volumes" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.200762 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-77bf78bfc6-kww85"] Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.202920 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.206308 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.206850 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.207243 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.207587 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.207831 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.212943 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.219023 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77bf78bfc6-kww85"] Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.221987 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.296738 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-client-ca\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.296825 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlwpg\" (UniqueName: \"kubernetes.io/projected/e52fd026-03db-4c60-bb63-d2f20b43855a-kube-api-access-nlwpg\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.296868 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-config\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.296927 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-proxy-ca-bundles\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.296986 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e52fd026-03db-4c60-bb63-d2f20b43855a-serving-cert\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.398263 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e52fd026-03db-4c60-bb63-d2f20b43855a-serving-cert\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.398425 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-client-ca\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.398480 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlwpg\" (UniqueName: \"kubernetes.io/projected/e52fd026-03db-4c60-bb63-d2f20b43855a-kube-api-access-nlwpg\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.398523 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-config\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.398572 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-proxy-ca-bundles\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.400329 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-client-ca\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.401083 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-proxy-ca-bundles\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.401630 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-config\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.404994 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e52fd026-03db-4c60-bb63-d2f20b43855a-serving-cert\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.428706 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlwpg\" (UniqueName: \"kubernetes.io/projected/e52fd026-03db-4c60-bb63-d2f20b43855a-kube-api-access-nlwpg\") pod \"controller-manager-77bf78bfc6-kww85\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:23:59 crc kubenswrapper[4685]: I0128 12:23:59.534867 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:24:04 crc kubenswrapper[4685]: I0128 12:24:04.590225 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:24:04 crc kubenswrapper[4685]: I0128 12:24:04.590556 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:24:14 crc kubenswrapper[4685]: I0128 12:24:14.589970 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:24:14 crc kubenswrapper[4685]: I0128 12:24:14.590566 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:24:24 crc kubenswrapper[4685]: I0128 12:24:24.589732 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:24:24 crc kubenswrapper[4685]: I0128 12:24:24.590767 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:24:27 crc kubenswrapper[4685]: I0128 12:24:27.071306 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:24:27 crc kubenswrapper[4685]: I0128 12:24:27.071374 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:24:34 crc kubenswrapper[4685]: I0128 12:24:34.589394 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:24:34 crc kubenswrapper[4685]: I0128 12:24:34.589803 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:24:44 crc kubenswrapper[4685]: I0128 12:24:44.589846 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:24:44 crc kubenswrapper[4685]: I0128 12:24:44.590638 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:24:54 crc kubenswrapper[4685]: I0128 12:24:54.589588 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:24:54 crc kubenswrapper[4685]: I0128 12:24:54.590064 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:24:57 crc kubenswrapper[4685]: I0128 12:24:57.070279 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:24:57 crc kubenswrapper[4685]: I0128 12:24:57.070726 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:24:57 crc kubenswrapper[4685]: I0128 12:24:57.070810 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:24:57 crc kubenswrapper[4685]: I0128 12:24:57.071774 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:24:57 crc kubenswrapper[4685]: I0128 12:24:57.071874 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9" gracePeriod=600 Jan 28 12:25:03 crc kubenswrapper[4685]: I0128 12:25:03.775823 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9" exitCode=0 Jan 28 12:25:03 crc kubenswrapper[4685]: I0128 12:25:03.775890 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9"} Jan 28 12:25:04 crc kubenswrapper[4685]: I0128 12:25:04.589519 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:25:04 crc kubenswrapper[4685]: I0128 12:25:04.589622 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:25:14 crc kubenswrapper[4685]: I0128 12:25:14.589810 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:25:14 crc kubenswrapper[4685]: I0128 12:25:14.590512 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:25:24 crc kubenswrapper[4685]: I0128 12:25:24.589528 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:25:24 crc kubenswrapper[4685]: I0128 12:25:24.590339 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:25:33 crc kubenswrapper[4685]: E0128 12:25:32.205443 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 12:25:33 crc kubenswrapper[4685]: E0128 12:25:32.206237 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jhtx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5zhxv_openshift-marketplace(6d558882-5a65-41ae-bcf0-d13c7cecc034): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:33 crc kubenswrapper[4685]: E0128 12:25:32.207494 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-5zhxv" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" Jan 28 12:25:34 crc kubenswrapper[4685]: I0128 12:25:34.590017 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:25:34 crc kubenswrapper[4685]: I0128 12:25:34.590809 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:25:36 crc kubenswrapper[4685]: E0128 12:25:36.382346 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 12:25:36 crc kubenswrapper[4685]: E0128 12:25:36.382727 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6bjln,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wl9lt_openshift-marketplace(e68fe67b-133d-4474-8f6d-a781bca954d7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:36 crc kubenswrapper[4685]: E0128 12:25:36.384078 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wl9lt" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" Jan 28 12:25:37 crc kubenswrapper[4685]: E0128 12:25:37.598043 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-wl9lt" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" Jan 28 12:25:37 crc kubenswrapper[4685]: E0128 12:25:37.598259 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5zhxv" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" Jan 28 12:25:37 crc kubenswrapper[4685]: E0128 12:25:37.951749 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 12:25:37 crc kubenswrapper[4685]: E0128 12:25:37.951913 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8mqdw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-9q8q2_openshift-marketplace(8d93a170-5ad3-489b-b3be-7e3cc4201970): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:37 crc kubenswrapper[4685]: E0128 12:25:37.953137 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-9q8q2" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" Jan 28 12:25:39 crc kubenswrapper[4685]: E0128 12:25:39.157469 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-9q8q2" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" Jan 28 12:25:39 crc kubenswrapper[4685]: E0128 12:25:39.234387 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 12:25:39 crc kubenswrapper[4685]: E0128 12:25:39.234619 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zq6fj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-smscz_openshift-marketplace(e4bd8edd-83cd-4485-b7ad-b13d5aa53a01): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:39 crc kubenswrapper[4685]: E0128 12:25:39.236000 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-smscz" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" Jan 28 12:25:41 crc kubenswrapper[4685]: E0128 12:25:41.698232 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-smscz" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" Jan 28 12:25:41 crc kubenswrapper[4685]: E0128 12:25:41.789807 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 12:25:41 crc kubenswrapper[4685]: E0128 12:25:41.790060 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f8m5z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-924cb_openshift-marketplace(cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:41 crc kubenswrapper[4685]: E0128 12:25:41.791810 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-924cb" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" Jan 28 12:25:41 crc kubenswrapper[4685]: E0128 12:25:41.795878 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 12:25:41 crc kubenswrapper[4685]: E0128 12:25:41.796044 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2rdrn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-8cf72_openshift-marketplace(424bdf45-4dcb-4b13-b68f-e55e115238bb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:41 crc kubenswrapper[4685]: E0128 12:25:41.797543 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-8cf72" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" Jan 28 12:25:41 crc kubenswrapper[4685]: I0128 12:25:41.903251 4685 scope.go:117] "RemoveContainer" containerID="e4554348b67369a8ade46f5a14ca7a0857f43281640faf12fe425e56d0d2f443" Jan 28 12:25:41 crc kubenswrapper[4685]: I0128 12:25:41.927836 4685 scope.go:117] "RemoveContainer" containerID="23e97a6f06efe0c2c8722fcc20556f395c12b349a256f9f59cd25aa343c8fd23" Jan 28 12:25:42 crc kubenswrapper[4685]: E0128 12:25:42.057352 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-8cf72" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" Jan 28 12:25:42 crc kubenswrapper[4685]: E0128 12:25:42.057779 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-924cb" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" Jan 28 12:25:42 crc kubenswrapper[4685]: W0128 12:25:42.366137 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-972e094d70fb421766ad969c7b4f321622a9bf467c5cb26e9eb5ec3f7d3033aa WatchSource:0}: Error finding container 972e094d70fb421766ad969c7b4f321622a9bf467c5cb26e9eb5ec3f7d3033aa: Status 404 returned error can't find the container with id 972e094d70fb421766ad969c7b4f321622a9bf467c5cb26e9eb5ec3f7d3033aa Jan 28 12:25:42 crc kubenswrapper[4685]: I0128 12:25:42.367070 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-5x4kp"] Jan 28 12:25:42 crc kubenswrapper[4685]: W0128 12:25:42.371028 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-9d5af3ad23abcb1c8a97e6b4c5e8d5b046940bb338e3aa9a1e6851ed35c3d930 WatchSource:0}: Error finding container 9d5af3ad23abcb1c8a97e6b4c5e8d5b046940bb338e3aa9a1e6851ed35c3d930: Status 404 returned error can't find the container with id 9d5af3ad23abcb1c8a97e6b4c5e8d5b046940bb338e3aa9a1e6851ed35c3d930 Jan 28 12:25:42 crc kubenswrapper[4685]: W0128 12:25:42.376105 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f0d7b7e_1577_4289_9043_ddf8dd9a48ef.slice/crio-01844967325e2e73f12451cb1d9652b7e1975cb20fd6e4cf1c252de68b459c26 WatchSource:0}: Error finding container 01844967325e2e73f12451cb1d9652b7e1975cb20fd6e4cf1c252de68b459c26: Status 404 returned error can't find the container with id 01844967325e2e73f12451cb1d9652b7e1975cb20fd6e4cf1c252de68b459c26 Jan 28 12:25:42 crc kubenswrapper[4685]: I0128 12:25:42.418723 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s"] Jan 28 12:25:42 crc kubenswrapper[4685]: I0128 12:25:42.428340 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 12:25:42 crc kubenswrapper[4685]: I0128 12:25:42.474084 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77bf78bfc6-kww85"] Jan 28 12:25:42 crc kubenswrapper[4685]: I0128 12:25:42.480942 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 12:25:42 crc kubenswrapper[4685]: W0128 12:25:42.890270 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod134521a2_eac8_4019_b5f0_847968893cb8.slice/crio-86ca680e03b9dc580d24b3740656c20bbc77d9e411a4229b79433455845dc59c WatchSource:0}: Error finding container 86ca680e03b9dc580d24b3740656c20bbc77d9e411a4229b79433455845dc59c: Status 404 returned error can't find the container with id 86ca680e03b9dc580d24b3740656c20bbc77d9e411a4229b79433455845dc59c Jan 28 12:25:42 crc kubenswrapper[4685]: W0128 12:25:42.895686 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod8fd0033f_a806_4189_8e54_f83b1f72f37d.slice/crio-fd4dd6444216f5a758dbb3964f30c02ffc75bc75c5556016d9cd6354eac73a7e WatchSource:0}: Error finding container fd4dd6444216f5a758dbb3964f30c02ffc75bc75c5556016d9cd6354eac73a7e: Status 404 returned error can't find the container with id fd4dd6444216f5a758dbb3964f30c02ffc75bc75c5556016d9cd6354eac73a7e Jan 28 12:25:42 crc kubenswrapper[4685]: W0128 12:25:42.903062 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod589536ea_5968_4231_8569_231a05f2e7e5.slice/crio-77ea0ea69309e9199c9d57f2cb7f86a99607ab403ab154e981c6a006f7bc8c4d WatchSource:0}: Error finding container 77ea0ea69309e9199c9d57f2cb7f86a99607ab403ab154e981c6a006f7bc8c4d: Status 404 returned error can't find the container with id 77ea0ea69309e9199c9d57f2cb7f86a99607ab403ab154e981c6a006f7bc8c4d Jan 28 12:25:42 crc kubenswrapper[4685]: W0128 12:25:42.905889 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode52fd026_03db_4c60_bb63_d2f20b43855a.slice/crio-8c40e199b77aa8a120ae8a0561235bc0dbe4a9329942a19d467e0488864ca03f WatchSource:0}: Error finding container 8c40e199b77aa8a120ae8a0561235bc0dbe4a9329942a19d467e0488864ca03f: Status 404 returned error can't find the container with id 8c40e199b77aa8a120ae8a0561235bc0dbe4a9329942a19d467e0488864ca03f Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.060929 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" event={"ID":"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef","Type":"ContainerStarted","Data":"01844967325e2e73f12451cb1d9652b7e1975cb20fd6e4cf1c252de68b459c26"} Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.063156 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"8fd0033f-a806-4189-8e54-f83b1f72f37d","Type":"ContainerStarted","Data":"fd4dd6444216f5a758dbb3964f30c02ffc75bc75c5556016d9cd6354eac73a7e"} Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.064758 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" event={"ID":"134521a2-eac8-4019-b5f0-847968893cb8","Type":"ContainerStarted","Data":"86ca680e03b9dc580d24b3740656c20bbc77d9e411a4229b79433455845dc59c"} Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.067564 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"589536ea-5968-4231-8569-231a05f2e7e5","Type":"ContainerStarted","Data":"77ea0ea69309e9199c9d57f2cb7f86a99607ab403ab154e981c6a006f7bc8c4d"} Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.068684 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"972e094d70fb421766ad969c7b4f321622a9bf467c5cb26e9eb5ec3f7d3033aa"} Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.069736 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" event={"ID":"e52fd026-03db-4c60-bb63-d2f20b43855a","Type":"ContainerStarted","Data":"8c40e199b77aa8a120ae8a0561235bc0dbe4a9329942a19d467e0488864ca03f"} Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.070893 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9d5af3ad23abcb1c8a97e6b4c5e8d5b046940bb338e3aa9a1e6851ed35c3d930"} Jan 28 12:25:43 crc kubenswrapper[4685]: I0128 12:25:43.072108 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"e1e161e4504aae2ca32ac13595ee9f64fb1561cf05015c17b6f3c157204b7a7b"} Jan 28 12:25:44 crc kubenswrapper[4685]: I0128 12:25:44.094341 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-54gh9" event={"ID":"79a36fbf-eb6c-442f-b6f0-d4a5f7435dde","Type":"ContainerStarted","Data":"20cbe911d1faf3d9abb61f91dbfb654d7bf6cc465854ec3772df14e4be8599ae"} Jan 28 12:25:44 crc kubenswrapper[4685]: I0128 12:25:44.590158 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:25:44 crc kubenswrapper[4685]: I0128 12:25:44.590270 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:25:45 crc kubenswrapper[4685]: E0128 12:25:45.576635 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 12:25:45 crc kubenswrapper[4685]: E0128 12:25:45.577010 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-glfkg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-ntg69_openshift-marketplace(7d2d54a9-9f8c-4158-8551-c4351bca8c19): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:45 crc kubenswrapper[4685]: E0128 12:25:45.578589 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-ntg69" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.105777 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"8fd0033f-a806-4189-8e54-f83b1f72f37d","Type":"ContainerStarted","Data":"2903e56e8a72307aece2742386438ef794a17c70ae48b8564a3c307f249032c0"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.106858 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" event={"ID":"134521a2-eac8-4019-b5f0-847968893cb8","Type":"ContainerStarted","Data":"b4caa62c8fe922578a05535950936738156dd348895c064399e997bad3ff2703"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.107053 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.109502 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"a525aa42b2fedd64e12200b250d522770d76d5b9f7da6fd1b15cf0f353da0c9d"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.110591 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" event={"ID":"e52fd026-03db-4c60-bb63-d2f20b43855a","Type":"ContainerStarted","Data":"f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.110981 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.111629 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"3b92daf794b2bbb19880047ff9dd303a39663e31c124f058b89122e23704d2b5"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.113196 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"d063c51f13e033d82369a0e3d2eb0adb21c2e146271dbed65e5d0a908f233682"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.113326 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.114254 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" event={"ID":"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef","Type":"ContainerStarted","Data":"c0cfdffb03eca1593d0d5cced19e46018ef030cf62342abc35e0ad547fd23bc2"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.115525 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.115812 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"589536ea-5968-4231-8569-231a05f2e7e5","Type":"ContainerStarted","Data":"e4c72a2bb9c06cab2e4f131eaa9fa379711b45f9b2f3b858839a4b934dfffaae"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.117913 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"aecd4086d734a9a5ce19d1b9358d0d1fb5ad160507471bf2237d516a100583f9"} Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.117952 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.118126 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.118195 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.122568 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=108.122548682 podStartE2EDuration="1m48.122548682s" podCreationTimestamp="2026-01-28 12:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:25:46.120212126 +0000 UTC m=+297.207625961" watchObservedRunningTime="2026-01-28 12:25:46.122548682 +0000 UTC m=+297.209962527" Jan 28 12:25:46 crc kubenswrapper[4685]: E0128 12:25:46.125730 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-ntg69" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.155865 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" podStartSLOduration=117.155841446 podStartE2EDuration="1m57.155841446s" podCreationTimestamp="2026-01-28 12:23:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:25:46.154630812 +0000 UTC m=+297.242044657" watchObservedRunningTime="2026-01-28 12:25:46.155841446 +0000 UTC m=+297.243255291" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.205620 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.266054 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" podStartSLOduration=117.266035041 podStartE2EDuration="1m57.266035041s" podCreationTimestamp="2026-01-28 12:23:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:25:46.260023852 +0000 UTC m=+297.347437687" watchObservedRunningTime="2026-01-28 12:25:46.266035041 +0000 UTC m=+297.353448886" Jan 28 12:25:46 crc kubenswrapper[4685]: I0128 12:25:46.317589 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=113.317574238 podStartE2EDuration="1m53.317574238s" podCreationTimestamp="2026-01-28 12:23:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:25:46.293573834 +0000 UTC m=+297.380987669" watchObservedRunningTime="2026-01-28 12:25:46.317574238 +0000 UTC m=+297.404988073" Jan 28 12:25:46 crc kubenswrapper[4685]: E0128 12:25:46.570387 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 12:25:46 crc kubenswrapper[4685]: E0128 12:25:46.570774 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gfjmg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-gvfhc_openshift-marketplace(94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 12:25:46 crc kubenswrapper[4685]: E0128 12:25:46.572042 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-gvfhc" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" Jan 28 12:25:47 crc kubenswrapper[4685]: I0128 12:25:47.156329 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5x4kp" event={"ID":"5f0d7b7e-1577-4289-9043-ddf8dd9a48ef","Type":"ContainerStarted","Data":"451a9d2fbad865fee29ddfb5fb10cba6be0ae0a108de43dbd7fe360f9fbe0ce2"} Jan 28 12:25:47 crc kubenswrapper[4685]: I0128 12:25:47.158623 4685 generic.go:334] "Generic (PLEG): container finished" podID="589536ea-5968-4231-8569-231a05f2e7e5" containerID="e4c72a2bb9c06cab2e4f131eaa9fa379711b45f9b2f3b858839a4b934dfffaae" exitCode=0 Jan 28 12:25:47 crc kubenswrapper[4685]: I0128 12:25:47.158712 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"589536ea-5968-4231-8569-231a05f2e7e5","Type":"ContainerDied","Data":"e4c72a2bb9c06cab2e4f131eaa9fa379711b45f9b2f3b858839a4b934dfffaae"} Jan 28 12:25:47 crc kubenswrapper[4685]: I0128 12:25:47.159777 4685 patch_prober.go:28] interesting pod/downloads-7954f5f757-54gh9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" start-of-body= Jan 28 12:25:47 crc kubenswrapper[4685]: I0128 12:25:47.159832 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-54gh9" podUID="79a36fbf-eb6c-442f-b6f0-d4a5f7435dde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.7:8080/\": dial tcp 10.217.0.7:8080: connect: connection refused" Jan 28 12:25:47 crc kubenswrapper[4685]: E0128 12:25:47.161828 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-gvfhc" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.191799 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-5x4kp" podStartSLOduration=264.191770145 podStartE2EDuration="4m24.191770145s" podCreationTimestamp="2026-01-28 12:21:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:25:48.190377146 +0000 UTC m=+299.277791021" watchObservedRunningTime="2026-01-28 12:25:48.191770145 +0000 UTC m=+299.279184010" Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.532020 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.721944 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/589536ea-5968-4231-8569-231a05f2e7e5-kubelet-dir\") pod \"589536ea-5968-4231-8569-231a05f2e7e5\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.722136 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/589536ea-5968-4231-8569-231a05f2e7e5-kube-api-access\") pod \"589536ea-5968-4231-8569-231a05f2e7e5\" (UID: \"589536ea-5968-4231-8569-231a05f2e7e5\") " Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.722432 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/589536ea-5968-4231-8569-231a05f2e7e5-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "589536ea-5968-4231-8569-231a05f2e7e5" (UID: "589536ea-5968-4231-8569-231a05f2e7e5"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.722595 4685 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/589536ea-5968-4231-8569-231a05f2e7e5-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.728878 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/589536ea-5968-4231-8569-231a05f2e7e5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "589536ea-5968-4231-8569-231a05f2e7e5" (UID: "589536ea-5968-4231-8569-231a05f2e7e5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:25:48 crc kubenswrapper[4685]: I0128 12:25:48.823704 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/589536ea-5968-4231-8569-231a05f2e7e5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:25:49 crc kubenswrapper[4685]: I0128 12:25:49.173920 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"589536ea-5968-4231-8569-231a05f2e7e5","Type":"ContainerDied","Data":"77ea0ea69309e9199c9d57f2cb7f86a99607ab403ab154e981c6a006f7bc8c4d"} Jan 28 12:25:49 crc kubenswrapper[4685]: I0128 12:25:49.173983 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77ea0ea69309e9199c9d57f2cb7f86a99607ab403ab154e981c6a006f7bc8c4d" Jan 28 12:25:49 crc kubenswrapper[4685]: I0128 12:25:49.174012 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 12:25:50 crc kubenswrapper[4685]: I0128 12:25:50.258558 4685 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 28 12:25:54 crc kubenswrapper[4685]: I0128 12:25:54.598400 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-54gh9" Jan 28 12:26:21 crc kubenswrapper[4685]: I0128 12:26:21.639380 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.329351 4685 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.329650 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="589536ea-5968-4231-8569-231a05f2e7e5" containerName="pruner" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.329668 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="589536ea-5968-4231-8569-231a05f2e7e5" containerName="pruner" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.329844 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="589536ea-5968-4231-8569-231a05f2e7e5" containerName="pruner" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.330402 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.341880 4685 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.342544 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991" gracePeriod=15 Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.342568 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d" gracePeriod=15 Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.342585 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339" gracePeriod=15 Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.342643 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e" gracePeriod=15 Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.342599 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e" gracePeriod=15 Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.343366 4685 file.go:109] "Unable to process watch event" err="can't process config file \"/etc/kubernetes/manifests/kube-apiserver-pod.yaml\": /etc/kubernetes/manifests/kube-apiserver-pod.yaml: couldn't parse as pod(Object 'Kind' is missing in 'null'), please check config file" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345609 4685 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345822 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345832 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345842 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345848 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345858 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345864 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345871 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345876 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345890 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345896 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345905 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345911 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345918 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345923 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: E0128 12:26:23.345929 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.345934 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.346018 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.346028 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.346036 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.346046 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.346051 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.346061 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.346068 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.373524 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.470804 4685 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.470862 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.501742 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.501852 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.501886 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.501930 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.501966 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.502042 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.502097 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.502203 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.603976 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604080 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604085 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604121 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604161 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604165 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604226 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604258 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604286 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604307 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604322 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604410 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604471 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604550 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604583 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.604497 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:23 crc kubenswrapper[4685]: I0128 12:26:23.670971 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:26:24 crc kubenswrapper[4685]: I0128 12:26:24.285648 4685 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Jan 28 12:26:24 crc kubenswrapper[4685]: I0128 12:26:24.286043 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Jan 28 12:26:25 crc kubenswrapper[4685]: I0128 12:26:25.406100 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:26:25 crc kubenswrapper[4685]: I0128 12:26:25.409012 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:26:25 crc kubenswrapper[4685]: I0128 12:26:25.409995 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e" exitCode=2 Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.120537 4685 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.121615 4685 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.122201 4685 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.122731 4685 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.123305 4685 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:26 crc kubenswrapper[4685]: I0128 12:26:26.123367 4685 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.123899 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="200ms" Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.325828 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="400ms" Jan 28 12:26:26 crc kubenswrapper[4685]: E0128 12:26:26.726440 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="800ms" Jan 28 12:26:27 crc kubenswrapper[4685]: I0128 12:26:27.425337 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:26:27 crc kubenswrapper[4685]: I0128 12:26:27.427880 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:26:27 crc kubenswrapper[4685]: I0128 12:26:27.428878 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e" exitCode=0 Jan 28 12:26:27 crc kubenswrapper[4685]: E0128 12:26:27.527906 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="1.6s" Jan 28 12:26:29 crc kubenswrapper[4685]: E0128 12:26:29.129356 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="3.2s" Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.447351 4685 generic.go:334] "Generic (PLEG): container finished" podID="8fd0033f-a806-4189-8e54-f83b1f72f37d" containerID="2903e56e8a72307aece2742386438ef794a17c70ae48b8564a3c307f249032c0" exitCode=0 Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.447509 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"8fd0033f-a806-4189-8e54-f83b1f72f37d","Type":"ContainerDied","Data":"2903e56e8a72307aece2742386438ef794a17c70ae48b8564a3c307f249032c0"} Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.448757 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.449252 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.450729 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.452418 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.453602 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d" exitCode=0 Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.453643 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339" exitCode=0 Jan 28 12:26:29 crc kubenswrapper[4685]: I0128 12:26:29.453681 4685 scope.go:117] "RemoveContainer" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:26:30 crc kubenswrapper[4685]: I0128 12:26:30.547885 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:30 crc kubenswrapper[4685]: I0128 12:26:30.548455 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:31 crc kubenswrapper[4685]: I0128 12:26:31.471116 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:26:31 crc kubenswrapper[4685]: I0128 12:26:31.472355 4685 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991" exitCode=0 Jan 28 12:26:32 crc kubenswrapper[4685]: E0128 12:26:32.330918 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="6.4s" Jan 28 12:26:36 crc kubenswrapper[4685]: I0128 12:26:36.763943 4685 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 28 12:26:36 crc kubenswrapper[4685]: I0128 12:26:36.764272 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 28 12:26:36 crc kubenswrapper[4685]: E0128 12:26:36.765447 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:26:36 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:26:36 crc kubenswrapper[4685]: body: Jan 28 12:26:36 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:26:36 crc kubenswrapper[4685]: > Jan 28 12:26:38 crc kubenswrapper[4685]: I0128 12:26:38.518551 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 12:26:38 crc kubenswrapper[4685]: I0128 12:26:38.518638 4685 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07" exitCode=1 Jan 28 12:26:38 crc kubenswrapper[4685]: I0128 12:26:38.518684 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07"} Jan 28 12:26:38 crc kubenswrapper[4685]: I0128 12:26:38.519309 4685 scope.go:117] "RemoveContainer" containerID="84b5b698f04a4ce2b037c151d53e4dd78c71892d3748238c5e9ca5bee413dd07" Jan 28 12:26:38 crc kubenswrapper[4685]: I0128 12:26:38.520374 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:38 crc kubenswrapper[4685]: I0128 12:26:38.521048 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:38 crc kubenswrapper[4685]: I0128 12:26:38.522137 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:38 crc kubenswrapper[4685]: E0128 12:26:38.732307 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:26:40 crc kubenswrapper[4685]: I0128 12:26:40.557967 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:40 crc kubenswrapper[4685]: I0128 12:26:40.558736 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:40 crc kubenswrapper[4685]: I0128 12:26:40.559634 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:43 crc kubenswrapper[4685]: I0128 12:26:43.449269 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:26:44 crc kubenswrapper[4685]: E0128 12:26:44.244683 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:26:44 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:26:44 crc kubenswrapper[4685]: body: Jan 28 12:26:44 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:26:44 crc kubenswrapper[4685]: > Jan 28 12:26:45 crc kubenswrapper[4685]: E0128 12:26:45.733940 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:26:46 crc kubenswrapper[4685]: I0128 12:26:46.763254 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:26:47 crc kubenswrapper[4685]: I0128 12:26:47.901944 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:26:50 crc kubenswrapper[4685]: I0128 12:26:50.550276 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:50 crc kubenswrapper[4685]: I0128 12:26:50.551388 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:50 crc kubenswrapper[4685]: I0128 12:26:50.551985 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:26:52 crc kubenswrapper[4685]: E0128 12:26:52.735719 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:26:54 crc kubenswrapper[4685]: E0128 12:26:54.246460 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:26:54 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:26:54 crc kubenswrapper[4685]: body: Jan 28 12:26:54 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:26:54 crc kubenswrapper[4685]: > Jan 28 12:26:59 crc kubenswrapper[4685]: E0128 12:26:59.737079 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:00 crc kubenswrapper[4685]: I0128 12:27:00.551076 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:00 crc kubenswrapper[4685]: I0128 12:27:00.551818 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:00 crc kubenswrapper[4685]: I0128 12:27:00.552461 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:04 crc kubenswrapper[4685]: E0128 12:27:04.247688 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:27:04 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:27:04 crc kubenswrapper[4685]: body: Jan 28 12:27:04 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:27:04 crc kubenswrapper[4685]: > Jan 28 12:27:05 crc kubenswrapper[4685]: E0128 12:27:05.619819 4685 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.175:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" volumeName="registry-storage" Jan 28 12:27:06 crc kubenswrapper[4685]: E0128 12:27:06.738686 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.757844 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-vrzqb_ef543e1b-8068-4ea3-b32a-61027b32e95d/approver/0.log" Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.760730 4685 generic.go:334] "Generic (PLEG): container finished" podID="ef543e1b-8068-4ea3-b32a-61027b32e95d" containerID="4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03" exitCode=1 Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.760811 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerDied","Data":"4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03"} Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.761913 4685 scope.go:117] "RemoveContainer" containerID="4c6fd0a7fe3d8d7b7bcdcd293b9fdf8401d8b6dca7a4cc390d6ea379aa3dbb03" Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.762666 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.763123 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.763596 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:09 crc kubenswrapper[4685]: I0128 12:27:09.764344 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:10 crc kubenswrapper[4685]: I0128 12:27:10.554103 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:10 crc kubenswrapper[4685]: I0128 12:27:10.554653 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:10 crc kubenswrapper[4685]: I0128 12:27:10.555224 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:10 crc kubenswrapper[4685]: I0128 12:27:10.556040 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:13 crc kubenswrapper[4685]: E0128 12:27:13.739855 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:14 crc kubenswrapper[4685]: E0128 12:27:14.249325 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:27:14 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:27:14 crc kubenswrapper[4685]: body: Jan 28 12:27:14 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:27:14 crc kubenswrapper[4685]: > Jan 28 12:27:20 crc kubenswrapper[4685]: I0128 12:27:20.552096 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:20 crc kubenswrapper[4685]: I0128 12:27:20.553143 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:20 crc kubenswrapper[4685]: I0128 12:27:20.553719 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:20 crc kubenswrapper[4685]: I0128 12:27:20.554263 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:20 crc kubenswrapper[4685]: E0128 12:27:20.741290 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.117261 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.120002 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.120745 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.121655 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.122271 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.194129 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-kubelet-dir\") pod \"8fd0033f-a806-4189-8e54-f83b1f72f37d\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.194361 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "8fd0033f-a806-4189-8e54-f83b1f72f37d" (UID: "8fd0033f-a806-4189-8e54-f83b1f72f37d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.194779 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fd0033f-a806-4189-8e54-f83b1f72f37d-kube-api-access\") pod \"8fd0033f-a806-4189-8e54-f83b1f72f37d\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.195139 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-var-lock\") pod \"8fd0033f-a806-4189-8e54-f83b1f72f37d\" (UID: \"8fd0033f-a806-4189-8e54-f83b1f72f37d\") " Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.195294 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-var-lock" (OuterVolumeSpecName: "var-lock") pod "8fd0033f-a806-4189-8e54-f83b1f72f37d" (UID: "8fd0033f-a806-4189-8e54-f83b1f72f37d"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.195860 4685 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.195890 4685 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8fd0033f-a806-4189-8e54-f83b1f72f37d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.204279 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fd0033f-a806-4189-8e54-f83b1f72f37d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "8fd0033f-a806-4189-8e54-f83b1f72f37d" (UID: "8fd0033f-a806-4189-8e54-f83b1f72f37d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.296648 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fd0033f-a806-4189-8e54-f83b1f72f37d-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.858747 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"8fd0033f-a806-4189-8e54-f83b1f72f37d","Type":"ContainerDied","Data":"fd4dd6444216f5a758dbb3964f30c02ffc75bc75c5556016d9cd6354eac73a7e"} Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.858811 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd4dd6444216f5a758dbb3964f30c02ffc75bc75c5556016d9cd6354eac73a7e" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.858854 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.865675 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.866411 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.866863 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:22 crc kubenswrapper[4685]: I0128 12:27:22.867416 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:24 crc kubenswrapper[4685]: E0128 12:27:24.250971 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:27:24 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:27:24 crc kubenswrapper[4685]: body: Jan 28 12:27:24 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:27:24 crc kubenswrapper[4685]: > Jan 28 12:27:27 crc kubenswrapper[4685]: E0128 12:27:27.742408 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.409404 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.411014 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.412236 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.413329 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.413801 4685 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.414209 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.414708 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.612038 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.612129 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.612268 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.612613 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.612790 4685 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.612835 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.612870 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.714707 4685 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.715033 4685 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.907821 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.909813 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.911296 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.912204 4685 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.913015 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.913569 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.914047 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.934281 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.934879 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.935398 4685 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.935827 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:28 crc kubenswrapper[4685]: I0128 12:27:28.936296 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:30 crc kubenswrapper[4685]: I0128 12:27:30.550274 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:30 crc kubenswrapper[4685]: I0128 12:27:30.550960 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:30 crc kubenswrapper[4685]: I0128 12:27:30.551589 4685 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:30 crc kubenswrapper[4685]: I0128 12:27:30.552067 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:30 crc kubenswrapper[4685]: I0128 12:27:30.552839 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:30 crc kubenswrapper[4685]: I0128 12:27:30.557531 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 28 12:27:34 crc kubenswrapper[4685]: E0128 12:27:34.253752 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:27:34 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:27:34 crc kubenswrapper[4685]: body: Jan 28 12:27:34 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:27:34 crc kubenswrapper[4685]: > Jan 28 12:27:34 crc kubenswrapper[4685]: E0128 12:27:34.743394 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:34 crc kubenswrapper[4685]: I0128 12:27:34.817504 4685 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-qpc29 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Jan 28 12:27:34 crc kubenswrapper[4685]: I0128 12:27:34.817568 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Jan 28 12:27:34 crc kubenswrapper[4685]: I0128 12:27:34.817504 4685 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-qpc29 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Jan 28 12:27:34 crc kubenswrapper[4685]: I0128 12:27:34.817628 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.552375 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.554820 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.555409 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.555927 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.556465 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.610138 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.610196 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:37 crc kubenswrapper[4685]: E0128 12:27:37.610768 4685 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:37 crc kubenswrapper[4685]: I0128 12:27:37.611596 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:39 crc kubenswrapper[4685]: I0128 12:27:39.746728 4685 scope.go:117] "RemoveContainer" containerID="311fa5cbc4c861e1b3532631f42db3865d1013d456014dfc8148395baf4a095d" Jan 28 12:27:39 crc kubenswrapper[4685]: I0128 12:27:39.998018 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.000695 4685 generic.go:334] "Generic (PLEG): container finished" podID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerID="876dd25e6aeaa9326b7f5a32cb96b37207281d17b0db25635e63d4e3a3e754c6" exitCode=0 Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.000735 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerDied","Data":"876dd25e6aeaa9326b7f5a32cb96b37207281d17b0db25635e63d4e3a3e754c6"} Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.001202 4685 scope.go:117] "RemoveContainer" containerID="876dd25e6aeaa9326b7f5a32cb96b37207281d17b0db25635e63d4e3a3e754c6" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.001642 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.002165 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.002449 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.002746 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.003049 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.555654 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.556691 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.557142 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.557623 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.558027 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:40 crc kubenswrapper[4685]: I0128 12:27:40.558480 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:41 crc kubenswrapper[4685]: E0128 12:27:41.744212 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:43 crc kubenswrapper[4685]: I0128 12:27:43.232005 4685 scope.go:117] "RemoveContainer" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:27:43 crc kubenswrapper[4685]: E0128 12:27:43.233272 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\": container with ID starting with 54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311 not found: ID does not exist" containerID="54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311" Jan 28 12:27:43 crc kubenswrapper[4685]: I0128 12:27:43.233458 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311"} err="failed to get container status \"54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\": rpc error: code = NotFound desc = could not find container \"54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311\": container with ID starting with 54d8d61de7fa7b124caf0fad1e7c9b8380142cce764d2d363d0c506076934311 not found: ID does not exist" Jan 28 12:27:43 crc kubenswrapper[4685]: I0128 12:27:43.233576 4685 scope.go:117] "RemoveContainer" containerID="8cf8b7d4f84db556d87442998df941ff050e59da3d49fb3fbd957ff9b61ed41e" Jan 28 12:27:44 crc kubenswrapper[4685]: I0128 12:27:44.028797 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:27:44 crc kubenswrapper[4685]: E0128 12:27:44.255413 4685 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event=< Jan 28 12:27:44 crc kubenswrapper[4685]: &Event{ObjectMeta:{kube-controller-manager-crc.188ee4bb50876286 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 12:27:44 crc kubenswrapper[4685]: body: Jan 28 12:27:44 crc kubenswrapper[4685]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,LastTimestamp:2026-01-28 12:26:36.764250758 +0000 UTC m=+347.851664603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 12:27:44 crc kubenswrapper[4685]: > Jan 28 12:27:44 crc kubenswrapper[4685]: W0128 12:27:44.291069 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-92305a5605188678d5540f53f5ade844ea928cc5ba0a724a257a4b2db7290715 WatchSource:0}: Error finding container 92305a5605188678d5540f53f5ade844ea928cc5ba0a724a257a4b2db7290715: Status 404 returned error can't find the container with id 92305a5605188678d5540f53f5ade844ea928cc5ba0a724a257a4b2db7290715 Jan 28 12:27:44 crc kubenswrapper[4685]: I0128 12:27:44.505881 4685 scope.go:117] "RemoveContainer" containerID="6977e9ae72af7410a3fa85f1726ef141f63a747b227c7f4c03ca161aaa31a339" Jan 28 12:27:44 crc kubenswrapper[4685]: I0128 12:27:44.817760 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:27:44 crc kubenswrapper[4685]: I0128 12:27:44.817849 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:27:45 crc kubenswrapper[4685]: I0128 12:27:45.041348 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 12:27:45 crc kubenswrapper[4685]: I0128 12:27:45.044620 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"92305a5605188678d5540f53f5ade844ea928cc5ba0a724a257a4b2db7290715"} Jan 28 12:27:45 crc kubenswrapper[4685]: I0128 12:27:45.593969 4685 scope.go:117] "RemoveContainer" containerID="e149b674ba2d2840004dee27482823a46cac1721d4e390c239545a74990e0d6e" Jan 28 12:27:45 crc kubenswrapper[4685]: W0128 12:27:45.608878 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-8dedd7562feec3e64f07e16d78254d0da1554d428be22eef6fdade19ceccd079 WatchSource:0}: Error finding container 8dedd7562feec3e64f07e16d78254d0da1554d428be22eef6fdade19ceccd079: Status 404 returned error can't find the container with id 8dedd7562feec3e64f07e16d78254d0da1554d428be22eef6fdade19ceccd079 Jan 28 12:27:45 crc kubenswrapper[4685]: I0128 12:27:45.699943 4685 scope.go:117] "RemoveContainer" containerID="caee44696934a4d387e1733d51a0595d1178240dbf3fedbc809bf243d2dee991" Jan 28 12:27:45 crc kubenswrapper[4685]: I0128 12:27:45.761461 4685 scope.go:117] "RemoveContainer" containerID="d52f8dab07d0419b1defd686bdb67960af38684d275197c3b781a97e7f2c9e37" Jan 28 12:27:46 crc kubenswrapper[4685]: I0128 12:27:46.050452 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8dedd7562feec3e64f07e16d78254d0da1554d428be22eef6fdade19ceccd079"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.056532 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerStarted","Data":"71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.058036 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"befdb729d4ff0fcce72c6fef49d8f658a25b7132bece377e7429c3b1cc2271eb"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.060252 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-vrzqb_ef543e1b-8068-4ea3-b32a-61027b32e95d/approver/0.log" Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.060702 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9b7c2ae9c4da35bec35fe050439a22b536741a29a46ff33082ff60cae8ea894b"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.064336 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerStarted","Data":"e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.065774 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerStarted","Data":"3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.067446 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerStarted","Data":"e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.069194 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerStarted","Data":"52ccd81667dc935e9130342d889c379b844e664866d2951188f9a67b2ee72ff3"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.072287 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerStarted","Data":"1b8e163aa4c59e5461b2978211e65b493bf880a2b306864c46a1b56b35f4a790"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.073928 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerStarted","Data":"c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12"} Jan 28 12:27:47 crc kubenswrapper[4685]: I0128 12:27:47.075342 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntg69" event={"ID":"7d2d54a9-9f8c-4158-8551-c4351bca8c19","Type":"ContainerStarted","Data":"9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.083539 4685 generic.go:334] "Generic (PLEG): container finished" podID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerID="1b8e163aa4c59e5461b2978211e65b493bf880a2b306864c46a1b56b35f4a790" exitCode=0 Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.083616 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerDied","Data":"1b8e163aa4c59e5461b2978211e65b493bf880a2b306864c46a1b56b35f4a790"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.085867 4685 generic.go:334] "Generic (PLEG): container finished" podID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerID="c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12" exitCode=0 Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.085953 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerDied","Data":"c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.086660 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.087121 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.087636 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.088092 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.088510 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.088829 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.089147 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.090584 4685 generic.go:334] "Generic (PLEG): container finished" podID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerID="e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182" exitCode=0 Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.090697 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerDied","Data":"e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.091233 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.091584 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.091798 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.092051 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.092455 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.092766 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.093221 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.093633 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.096162 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.096330 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"295ec7ff5940f85a5c90ed2737816def1a8cd11e1218c705206ae7f20d5b84b8"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.100554 4685 generic.go:334] "Generic (PLEG): container finished" podID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerID="52ccd81667dc935e9130342d889c379b844e664866d2951188f9a67b2ee72ff3" exitCode=0 Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.100607 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerDied","Data":"52ccd81667dc935e9130342d889c379b844e664866d2951188f9a67b2ee72ff3"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.102645 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.104983 4685 generic.go:334] "Generic (PLEG): container finished" podID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerID="e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82" exitCode=0 Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.105043 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerDied","Data":"e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.106139 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.106579 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.106967 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.107378 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.107657 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.107981 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.108319 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.108519 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.108714 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.109946 4685 generic.go:334] "Generic (PLEG): container finished" podID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerID="3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77" exitCode=0 Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.110056 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerDied","Data":"3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.111381 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.111670 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.112081 4685 status_manager.go:851] "Failed to get status for pod" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" pod="openshift-marketplace/redhat-marketplace-8cf72" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-8cf72\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.115478 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.115866 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.116369 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.118630 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.119039 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.119466 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.119822 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.119870 4685 generic.go:334] "Generic (PLEG): container finished" podID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerID="9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d" exitCode=0 Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.119951 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntg69" event={"ID":"7d2d54a9-9f8c-4158-8551-c4351bca8c19","Type":"ContainerDied","Data":"9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.123160 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerStarted","Data":"c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c"} Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.123628 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.124037 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.124447 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.124516 4685 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-qpc29 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.124557 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.124688 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.125050 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.125576 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.125963 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.126377 4685 status_manager.go:851] "Failed to get status for pod" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" pod="openshift-marketplace/redhat-marketplace-8cf72" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-8cf72\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.126697 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.127016 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.127465 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.127898 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.128478 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.128876 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.129271 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.129638 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.129973 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.130368 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.130729 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.131107 4685 status_manager.go:851] "Failed to get status for pod" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" pod="openshift-marketplace/redhat-marketplace-8cf72" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-8cf72\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: I0128 12:27:48.131538 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: E0128 12:27:48.676936 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:27:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:27:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:27:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T12:27:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[],\\\"sizeBytes\\\":1675404549},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: E0128 12:27:48.677392 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: E0128 12:27:48.677673 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: E0128 12:27:48.678031 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: E0128 12:27:48.678521 4685 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:48 crc kubenswrapper[4685]: E0128 12:27:48.678579 4685 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 12:27:48 crc kubenswrapper[4685]: E0128 12:27:48.745926 4685 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="7s" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.131919 4685 generic.go:334] "Generic (PLEG): container finished" podID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerID="c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c" exitCode=0 Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.131999 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerDied","Data":"c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c"} Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.132871 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.133293 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.134117 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.134560 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.134579 4685 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="befdb729d4ff0fcce72c6fef49d8f658a25b7132bece377e7429c3b1cc2271eb" exitCode=0 Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.134670 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"befdb729d4ff0fcce72c6fef49d8f658a25b7132bece377e7429c3b1cc2271eb"} Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.134799 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.134838 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.134858 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.135058 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: E0128 12:27:49.135093 4685 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.135346 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.135535 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.135735 4685 status_manager.go:851] "Failed to get status for pod" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" pod="openshift-marketplace/community-operators-924cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-924cb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.135977 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.136304 4685 status_manager.go:851] "Failed to get status for pod" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" pod="openshift-marketplace/redhat-marketplace-8cf72" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-8cf72\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.136573 4685 status_manager.go:851] "Failed to get status for pod" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" pod="openshift-marketplace/community-operators-924cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-924cb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.136599 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/1.log" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.136810 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137014 4685 generic.go:334] "Generic (PLEG): container finished" podID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerID="71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982" exitCode=1 Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137050 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137117 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerDied","Data":"71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982"} Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137263 4685 status_manager.go:851] "Failed to get status for pod" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" pod="openshift-marketplace/redhat-marketplace-8cf72" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-8cf72\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137297 4685 scope.go:117] "RemoveContainer" containerID="71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137445 4685 scope.go:117] "RemoveContainer" containerID="876dd25e6aeaa9326b7f5a32cb96b37207281d17b0db25635e63d4e3a3e754c6" Jan 28 12:27:49 crc kubenswrapper[4685]: E0128 12:27:49.137456 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137491 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.137731 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.138133 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.138304 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.138527 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.138753 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.139104 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.151618 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.151876 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.152019 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.152208 4685 status_manager.go:851] "Failed to get status for pod" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" pod="openshift-marketplace/redhat-operators-5zhxv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5zhxv\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.152353 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.152493 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.152633 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.152765 4685 status_manager.go:851] "Failed to get status for pod" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" pod="openshift-marketplace/community-operators-924cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-924cb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.152901 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.153033 4685 status_manager.go:851] "Failed to get status for pod" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" pod="openshift-marketplace/redhat-marketplace-8cf72" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-8cf72\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.153191 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.153340 4685 status_manager.go:851] "Failed to get status for pod" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" pod="openshift-marketplace/redhat-marketplace-smscz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-smscz\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.153470 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:49 crc kubenswrapper[4685]: I0128 12:27:49.153639 4685 status_manager.go:851] "Failed to get status for pod" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" pod="openshift-marketplace/certified-operators-ntg69" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-ntg69\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.166217 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/1.log" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.167392 4685 scope.go:117] "RemoveContainer" containerID="71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.167723 4685 status_manager.go:851] "Failed to get status for pod" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" pod="openshift-marketplace/redhat-operators-wl9lt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-wl9lt\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.168031 4685 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: E0128 12:27:50.168055 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.168233 4685 status_manager.go:851] "Failed to get status for pod" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" pod="openshift-marketplace/redhat-operators-5zhxv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5zhxv\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.168538 4685 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.168858 4685 status_manager.go:851] "Failed to get status for pod" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-qpc29\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.169112 4685 status_manager.go:851] "Failed to get status for pod" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" pod="openshift-marketplace/community-operators-924cb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-924cb\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.169409 4685 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.169603 4685 status_manager.go:851] "Failed to get status for pod" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" pod="openshift-marketplace/redhat-marketplace-8cf72" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-8cf72\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.169763 4685 status_manager.go:851] "Failed to get status for pod" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" pod="openshift-marketplace/community-operators-gvfhc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-gvfhc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.169921 4685 status_manager.go:851] "Failed to get status for pod" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.170081 4685 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.170297 4685 status_manager.go:851] "Failed to get status for pod" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" pod="openshift-marketplace/redhat-marketplace-smscz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-smscz\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.170482 4685 status_manager.go:851] "Failed to get status for pod" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" pod="openshift-marketplace/certified-operators-ntg69" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-ntg69\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:50 crc kubenswrapper[4685]: I0128 12:27:50.170666 4685 status_manager.go:851] "Failed to get status for pod" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" pod="openshift-marketplace/certified-operators-9q8q2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-9q8q2\": dial tcp 38.102.83.175:6443: connect: connection refused" Jan 28 12:27:51 crc kubenswrapper[4685]: I0128 12:27:51.173427 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerStarted","Data":"a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b"} Jan 28 12:27:51 crc kubenswrapper[4685]: I0128 12:27:51.175304 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e0ab69775a41dbbc6c32cff2774b30d98489d61fcf231ca67ea59594487bf26a"} Jan 28 12:27:51 crc kubenswrapper[4685]: I0128 12:27:51.175328 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"237cdb43a41d27f12394d2a1972d189330fb805fc4b9707868bfd10511552404"} Jan 28 12:27:51 crc kubenswrapper[4685]: I0128 12:27:51.176916 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerStarted","Data":"e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d"} Jan 28 12:27:52 crc kubenswrapper[4685]: I0128 12:27:52.186321 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3fe9969348b56433e1017b6bd0e9313060fbb37a4b0c733eefab48ddc8fe7be1"} Jan 28 12:27:52 crc kubenswrapper[4685]: I0128 12:27:52.186709 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"6a1104df2b41e4be8b792ab800f93cc70e43c301af34700812eedfcbf79aa751"} Jan 28 12:27:52 crc kubenswrapper[4685]: I0128 12:27:52.188959 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerStarted","Data":"d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088"} Jan 28 12:27:52 crc kubenswrapper[4685]: I0128 12:27:52.191305 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerStarted","Data":"809e6fae14ecfb2c50c23fe8025cf3d553761ff959dceffddf9bd8fef370f303"} Jan 28 12:27:52 crc kubenswrapper[4685]: I0128 12:27:52.192768 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerStarted","Data":"6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815"} Jan 28 12:27:52 crc kubenswrapper[4685]: I0128 12:27:52.194886 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerStarted","Data":"03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79"} Jan 28 12:27:52 crc kubenswrapper[4685]: I0128 12:27:52.197622 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerStarted","Data":"09825a1e10594adcce28925c5d6d2537a1fd308ff1f40338842a902999563e97"} Jan 28 12:27:53 crc kubenswrapper[4685]: I0128 12:27:53.128937 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:27:53 crc kubenswrapper[4685]: I0128 12:27:53.129022 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:27:53 crc kubenswrapper[4685]: I0128 12:27:53.533495 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:27:53 crc kubenswrapper[4685]: I0128 12:27:53.533866 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.509712 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-8cf72" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="registry-server" probeResult="failure" output=< Jan 28 12:27:54 crc kubenswrapper[4685]: timeout: failed to connect service ":50051" within 1s Jan 28 12:27:54 crc kubenswrapper[4685]: > Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.539687 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.539756 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.587505 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-smscz" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="registry-server" probeResult="failure" output=< Jan 28 12:27:54 crc kubenswrapper[4685]: timeout: failed to connect service ":50051" within 1s Jan 28 12:27:54 crc kubenswrapper[4685]: > Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.817051 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.817925 4685 scope.go:117] "RemoveContainer" containerID="71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982" Jan 28 12:27:54 crc kubenswrapper[4685]: E0128 12:27:54.818240 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.933742 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:27:54 crc kubenswrapper[4685]: I0128 12:27:54.933822 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:27:55 crc kubenswrapper[4685]: I0128 12:27:55.216338 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c4c262ec97917cbaa49589c64f1e981e271bb301c6c40ba195a5b13b05c8b0cc"} Jan 28 12:27:55 crc kubenswrapper[4685]: I0128 12:27:55.218581 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntg69" event={"ID":"7d2d54a9-9f8c-4158-8551-c4351bca8c19","Type":"ContainerStarted","Data":"cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69"} Jan 28 12:27:55 crc kubenswrapper[4685]: I0128 12:27:55.574804 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wl9lt" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="registry-server" probeResult="failure" output=< Jan 28 12:27:55 crc kubenswrapper[4685]: timeout: failed to connect service ":50051" within 1s Jan 28 12:27:55 crc kubenswrapper[4685]: > Jan 28 12:27:55 crc kubenswrapper[4685]: I0128 12:27:55.971978 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5zhxv" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="registry-server" probeResult="failure" output=< Jan 28 12:27:55 crc kubenswrapper[4685]: timeout: failed to connect service ":50051" within 1s Jan 28 12:27:55 crc kubenswrapper[4685]: > Jan 28 12:27:56 crc kubenswrapper[4685]: I0128 12:27:56.762847 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.069069 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.069130 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.229098 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.229977 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.230159 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.237964 4685 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.238677 4685 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58bc04bb-fcb0-42e3-8225-369bc0272d41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://237cdb43a41d27f12394d2a1972d189330fb805fc4b9707868bfd10511552404\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:27:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1104df2b41e4be8b792ab800f93cc70e43c301af34700812eedfcbf79aa751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:27:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0ab69775a41dbbc6c32cff2774b30d98489d61fcf231ca67ea59594487bf26a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:27:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c262ec97917cbaa49589c64f1e981e271bb301c6c40ba195a5b13b05c8b0cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:27:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3fe9969348b56433e1017b6bd0e9313060fbb37a4b0c733eefab48ddc8fe7be1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T12:27:52Z\\\"}}}],\\\"phase\\\":\\\"Running\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": pods \"kube-apiserver-crc\" not found" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.612846 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.613092 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.618559 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.621021 4685 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6decb284-ec76-47d0-85eb-807d2d91a68e" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.901724 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:27:57 crc kubenswrapper[4685]: I0128 12:27:57.906590 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:27:58 crc kubenswrapper[4685]: I0128 12:27:58.233677 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:58 crc kubenswrapper[4685]: I0128 12:27:58.233715 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:58 crc kubenswrapper[4685]: I0128 12:27:58.239231 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 12:27:58 crc kubenswrapper[4685]: I0128 12:27:58.239283 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:27:59 crc kubenswrapper[4685]: I0128 12:27:59.238203 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:27:59 crc kubenswrapper[4685]: I0128 12:27:59.238520 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:28:00 crc kubenswrapper[4685]: I0128 12:28:00.242566 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:28:00 crc kubenswrapper[4685]: I0128 12:28:00.242607 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:28:00 crc kubenswrapper[4685]: I0128 12:28:00.599751 4685 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6decb284-ec76-47d0-85eb-807d2d91a68e" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.143581 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.143648 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.209203 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.304749 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.368735 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.369225 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.405228 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.735713 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.735807 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.799861 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.843977 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.844280 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:28:01 crc kubenswrapper[4685]: I0128 12:28:01.891466 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:28:02 crc kubenswrapper[4685]: I0128 12:28:02.312752 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:28:02 crc kubenswrapper[4685]: I0128 12:28:02.312835 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:28:02 crc kubenswrapper[4685]: I0128 12:28:02.348716 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:28:03 crc kubenswrapper[4685]: I0128 12:28:03.182835 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:28:03 crc kubenswrapper[4685]: I0128 12:28:03.226893 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:28:03 crc kubenswrapper[4685]: I0128 12:28:03.593231 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:28:03 crc kubenswrapper[4685]: I0128 12:28:03.629870 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:28:04 crc kubenswrapper[4685]: I0128 12:28:04.586409 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:28:04 crc kubenswrapper[4685]: I0128 12:28:04.640277 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:28:04 crc kubenswrapper[4685]: I0128 12:28:04.971275 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:28:05 crc kubenswrapper[4685]: I0128 12:28:05.006566 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:28:07 crc kubenswrapper[4685]: I0128 12:28:07.545199 4685 scope.go:117] "RemoveContainer" containerID="71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982" Jan 28 12:28:09 crc kubenswrapper[4685]: I0128 12:28:09.293261 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/1.log" Jan 28 12:28:09 crc kubenswrapper[4685]: I0128 12:28:09.293503 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerStarted","Data":"75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42"} Jan 28 12:28:10 crc kubenswrapper[4685]: I0128 12:28:10.301544 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/2.log" Jan 28 12:28:10 crc kubenswrapper[4685]: I0128 12:28:10.302322 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/1.log" Jan 28 12:28:10 crc kubenswrapper[4685]: I0128 12:28:10.302381 4685 generic.go:334] "Generic (PLEG): container finished" podID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerID="75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42" exitCode=1 Jan 28 12:28:10 crc kubenswrapper[4685]: I0128 12:28:10.302414 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerDied","Data":"75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42"} Jan 28 12:28:10 crc kubenswrapper[4685]: I0128 12:28:10.302453 4685 scope.go:117] "RemoveContainer" containerID="71a1e5a0e8ccd005e834a33dfd3b46afab2cab7ea29b9efd310d6fef958b0982" Jan 28 12:28:10 crc kubenswrapper[4685]: I0128 12:28:10.303192 4685 scope.go:117] "RemoveContainer" containerID="75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42" Jan 28 12:28:10 crc kubenswrapper[4685]: E0128 12:28:10.303477 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:28:11 crc kubenswrapper[4685]: I0128 12:28:11.308562 4685 scope.go:117] "RemoveContainer" containerID="75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42" Jan 28 12:28:11 crc kubenswrapper[4685]: E0128 12:28:11.308796 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:28:12 crc kubenswrapper[4685]: I0128 12:28:12.323298 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/2.log" Jan 28 12:28:14 crc kubenswrapper[4685]: I0128 12:28:14.817349 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:28:14 crc kubenswrapper[4685]: I0128 12:28:14.817421 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:28:14 crc kubenswrapper[4685]: I0128 12:28:14.818081 4685 scope.go:117] "RemoveContainer" containerID="75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42" Jan 28 12:28:14 crc kubenswrapper[4685]: E0128 12:28:14.818338 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:28:23 crc kubenswrapper[4685]: I0128 12:28:23.389621 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/0.log" Jan 28 12:28:23 crc kubenswrapper[4685]: I0128 12:28:23.391604 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" event={"ID":"0aa6b49a-8078-44f4-b1a9-2542d5bad461","Type":"ContainerDied","Data":"a19cc0691543a9561ca9cb11f863d04005f966847556e2328724e6f08273f7f4"} Jan 28 12:28:23 crc kubenswrapper[4685]: I0128 12:28:23.391523 4685 generic.go:334] "Generic (PLEG): container finished" podID="0aa6b49a-8078-44f4-b1a9-2542d5bad461" containerID="a19cc0691543a9561ca9cb11f863d04005f966847556e2328724e6f08273f7f4" exitCode=1 Jan 28 12:28:23 crc kubenswrapper[4685]: I0128 12:28:23.394150 4685 scope.go:117] "RemoveContainer" containerID="a19cc0691543a9561ca9cb11f863d04005f966847556e2328724e6f08273f7f4" Jan 28 12:28:25 crc kubenswrapper[4685]: I0128 12:28:25.361877 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:28:25 crc kubenswrapper[4685]: I0128 12:28:25.485349 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 12:28:25 crc kubenswrapper[4685]: I0128 12:28:25.604585 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 12:28:26 crc kubenswrapper[4685]: I0128 12:28:26.415704 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/0.log" Jan 28 12:28:26 crc kubenswrapper[4685]: I0128 12:28:26.415776 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" event={"ID":"0aa6b49a-8078-44f4-b1a9-2542d5bad461","Type":"ContainerStarted","Data":"5f6f5ce608a8579961e3cee2b2a7aac798708f1b8cc73332fdde6627c9c54f89"} Jan 28 12:28:26 crc kubenswrapper[4685]: I0128 12:28:26.546009 4685 scope.go:117] "RemoveContainer" containerID="75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42" Jan 28 12:28:26 crc kubenswrapper[4685]: E0128 12:28:26.546359 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:28:26 crc kubenswrapper[4685]: I0128 12:28:26.969069 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 12:28:27 crc kubenswrapper[4685]: I0128 12:28:27.070441 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:28:27 crc kubenswrapper[4685]: I0128 12:28:27.070529 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:28:27 crc kubenswrapper[4685]: I0128 12:28:27.424843 4685 generic.go:334] "Generic (PLEG): container finished" podID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerID="f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234" exitCode=0 Jan 28 12:28:27 crc kubenswrapper[4685]: I0128 12:28:27.424906 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" event={"ID":"e52fd026-03db-4c60-bb63-d2f20b43855a","Type":"ContainerDied","Data":"f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234"} Jan 28 12:28:27 crc kubenswrapper[4685]: I0128 12:28:27.425569 4685 scope.go:117] "RemoveContainer" containerID="f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234" Jan 28 12:28:28 crc kubenswrapper[4685]: I0128 12:28:28.085530 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 12:28:28 crc kubenswrapper[4685]: I0128 12:28:28.609097 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 12:28:29 crc kubenswrapper[4685]: I0128 12:28:29.332139 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 12:28:29 crc kubenswrapper[4685]: I0128 12:28:29.438529 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" event={"ID":"e52fd026-03db-4c60-bb63-d2f20b43855a","Type":"ContainerStarted","Data":"2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be"} Jan 28 12:28:29 crc kubenswrapper[4685]: I0128 12:28:29.535377 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:28:29 crc kubenswrapper[4685]: I0128 12:28:29.535418 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:28:29 crc kubenswrapper[4685]: I0128 12:28:29.832008 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 12:28:30 crc kubenswrapper[4685]: I0128 12:28:30.195858 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 12:28:30 crc kubenswrapper[4685]: I0128 12:28:30.355877 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 12:28:30 crc kubenswrapper[4685]: I0128 12:28:30.444270 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:28:30 crc kubenswrapper[4685]: I0128 12:28:30.448403 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:28:30 crc kubenswrapper[4685]: I0128 12:28:30.648842 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 12:28:30 crc kubenswrapper[4685]: I0128 12:28:30.743554 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 12:28:30 crc kubenswrapper[4685]: I0128 12:28:30.897686 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 12:28:31 crc kubenswrapper[4685]: I0128 12:28:31.660217 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 12:28:32 crc kubenswrapper[4685]: I0128 12:28:32.467393 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 12:28:32 crc kubenswrapper[4685]: I0128 12:28:32.524985 4685 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 12:28:33 crc kubenswrapper[4685]: I0128 12:28:33.091971 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 12:28:33 crc kubenswrapper[4685]: I0128 12:28:33.314034 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 12:28:35 crc kubenswrapper[4685]: I0128 12:28:35.206833 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 12:28:35 crc kubenswrapper[4685]: I0128 12:28:35.570647 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 12:28:37 crc kubenswrapper[4685]: I0128 12:28:37.687395 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 12:28:37 crc kubenswrapper[4685]: I0128 12:28:37.932365 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 12:28:38 crc kubenswrapper[4685]: I0128 12:28:38.883427 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 12:28:39 crc kubenswrapper[4685]: I0128 12:28:39.125856 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 12:28:39 crc kubenswrapper[4685]: I0128 12:28:39.154203 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 12:28:39 crc kubenswrapper[4685]: I0128 12:28:39.527068 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 12:28:39 crc kubenswrapper[4685]: I0128 12:28:39.546273 4685 scope.go:117] "RemoveContainer" containerID="75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42" Jan 28 12:28:39 crc kubenswrapper[4685]: I0128 12:28:39.848412 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.448679 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.507973 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/3.log" Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.508557 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/2.log" Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.508620 4685 generic.go:334] "Generic (PLEG): container finished" podID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" exitCode=1 Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.508654 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerDied","Data":"7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931"} Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.508691 4685 scope.go:117] "RemoveContainer" containerID="75b2ff7b44c2c7b65da4b0e671f102bf16b58fd9c78efae0607f0d87c8b60c42" Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.509783 4685 scope.go:117] "RemoveContainer" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:28:40 crc kubenswrapper[4685]: E0128 12:28:40.510315 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:28:40 crc kubenswrapper[4685]: I0128 12:28:40.620826 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 12:28:41 crc kubenswrapper[4685]: I0128 12:28:41.279974 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:28:41 crc kubenswrapper[4685]: I0128 12:28:41.518350 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/3.log" Jan 28 12:28:41 crc kubenswrapper[4685]: I0128 12:28:41.970489 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 12:28:42 crc kubenswrapper[4685]: I0128 12:28:42.101609 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 12:28:42 crc kubenswrapper[4685]: I0128 12:28:42.319386 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 12:28:42 crc kubenswrapper[4685]: I0128 12:28:42.578567 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 12:28:44 crc kubenswrapper[4685]: I0128 12:28:44.337630 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 12:28:44 crc kubenswrapper[4685]: I0128 12:28:44.675823 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 12:28:44 crc kubenswrapper[4685]: I0128 12:28:44.817225 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:28:44 crc kubenswrapper[4685]: I0128 12:28:44.817746 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:28:44 crc kubenswrapper[4685]: I0128 12:28:44.819473 4685 scope.go:117] "RemoveContainer" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:28:44 crc kubenswrapper[4685]: E0128 12:28:44.819924 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:28:44 crc kubenswrapper[4685]: I0128 12:28:44.965734 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 12:28:45 crc kubenswrapper[4685]: I0128 12:28:45.956418 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 12:28:46 crc kubenswrapper[4685]: I0128 12:28:46.769412 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 12:28:46 crc kubenswrapper[4685]: I0128 12:28:46.913163 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 12:28:47 crc kubenswrapper[4685]: I0128 12:28:47.218863 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 12:28:47 crc kubenswrapper[4685]: I0128 12:28:47.444757 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 12:28:47 crc kubenswrapper[4685]: I0128 12:28:47.579698 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 12:28:47 crc kubenswrapper[4685]: I0128 12:28:47.814523 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:28:48 crc kubenswrapper[4685]: I0128 12:28:48.317088 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 12:28:48 crc kubenswrapper[4685]: I0128 12:28:48.654032 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:28:48 crc kubenswrapper[4685]: I0128 12:28:48.780845 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 12:28:49 crc kubenswrapper[4685]: I0128 12:28:49.073316 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.024997 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.074823 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.166387 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.379501 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.594403 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.612452 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.660195 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.810707 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 12:28:50 crc kubenswrapper[4685]: I0128 12:28:50.931927 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 12:28:51 crc kubenswrapper[4685]: I0128 12:28:51.100039 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 12:28:51 crc kubenswrapper[4685]: I0128 12:28:51.300107 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 12:28:51 crc kubenswrapper[4685]: I0128 12:28:51.752827 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:28:51 crc kubenswrapper[4685]: I0128 12:28:51.838558 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 12:28:51 crc kubenswrapper[4685]: I0128 12:28:51.931849 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:28:52 crc kubenswrapper[4685]: I0128 12:28:52.350364 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 12:28:52 crc kubenswrapper[4685]: I0128 12:28:52.483919 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 12:28:52 crc kubenswrapper[4685]: I0128 12:28:52.491893 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 12:28:52 crc kubenswrapper[4685]: I0128 12:28:52.882678 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 12:28:52 crc kubenswrapper[4685]: I0128 12:28:52.883941 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 12:28:53 crc kubenswrapper[4685]: I0128 12:28:53.215005 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 12:28:53 crc kubenswrapper[4685]: I0128 12:28:53.740642 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 12:28:53 crc kubenswrapper[4685]: I0128 12:28:53.816768 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 12:28:53 crc kubenswrapper[4685]: I0128 12:28:53.823308 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 12:28:53 crc kubenswrapper[4685]: I0128 12:28:53.841581 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 12:28:53 crc kubenswrapper[4685]: I0128 12:28:53.970784 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 12:28:54 crc kubenswrapper[4685]: I0128 12:28:54.318914 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 12:28:54 crc kubenswrapper[4685]: I0128 12:28:54.596117 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-4h4rx_91fd9e56-9836-4427-b58e-9c0742895c7a/machine-approver-controller/0.log" Jan 28 12:28:54 crc kubenswrapper[4685]: I0128 12:28:54.596790 4685 generic.go:334] "Generic (PLEG): container finished" podID="91fd9e56-9836-4427-b58e-9c0742895c7a" containerID="1b74a283622c2ebcb056d662da66d602d7c689d34dd83561eef92054ee9cb02e" exitCode=255 Jan 28 12:28:54 crc kubenswrapper[4685]: I0128 12:28:54.596833 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" event={"ID":"91fd9e56-9836-4427-b58e-9c0742895c7a","Type":"ContainerDied","Data":"1b74a283622c2ebcb056d662da66d602d7c689d34dd83561eef92054ee9cb02e"} Jan 28 12:28:54 crc kubenswrapper[4685]: I0128 12:28:54.597393 4685 scope.go:117] "RemoveContainer" containerID="1b74a283622c2ebcb056d662da66d602d7c689d34dd83561eef92054ee9cb02e" Jan 28 12:28:54 crc kubenswrapper[4685]: I0128 12:28:54.660113 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 12:28:55 crc kubenswrapper[4685]: I0128 12:28:55.117376 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 12:28:55 crc kubenswrapper[4685]: I0128 12:28:55.444517 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 12:28:55 crc kubenswrapper[4685]: I0128 12:28:55.609779 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-4h4rx_91fd9e56-9836-4427-b58e-9c0742895c7a/machine-approver-controller/0.log" Jan 28 12:28:55 crc kubenswrapper[4685]: I0128 12:28:55.610803 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" event={"ID":"91fd9e56-9836-4427-b58e-9c0742895c7a","Type":"ContainerStarted","Data":"b4a1452a72b69c520be8b025342201f8e52b59f5411990c5184fe76ee28f219f"} Jan 28 12:28:56 crc kubenswrapper[4685]: I0128 12:28:56.023225 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 12:28:56 crc kubenswrapper[4685]: I0128 12:28:56.298535 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 12:28:56 crc kubenswrapper[4685]: I0128 12:28:56.723393 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 12:28:56 crc kubenswrapper[4685]: I0128 12:28:56.878878 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.070091 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.070230 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.070469 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.073106 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a525aa42b2fedd64e12200b250d522770d76d5b9f7da6fd1b15cf0f353da0c9d"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.073257 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://a525aa42b2fedd64e12200b250d522770d76d5b9f7da6fd1b15cf0f353da0c9d" gracePeriod=600 Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.628512 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="a525aa42b2fedd64e12200b250d522770d76d5b9f7da6fd1b15cf0f353da0c9d" exitCode=0 Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.628586 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"a525aa42b2fedd64e12200b250d522770d76d5b9f7da6fd1b15cf0f353da0c9d"} Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.628917 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"b08beb29fe5b7d62b07eaeb5fd852ce2c7a3d0905ce5b3b6ba6586d8fc4e3107"} Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.628943 4685 scope.go:117] "RemoveContainer" containerID="022ecbc0501c89d7ba051cca156f951a13fc0e61235b4f050c513b8134607de9" Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.630407 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 12:28:57 crc kubenswrapper[4685]: I0128 12:28:57.642796 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 12:28:58 crc kubenswrapper[4685]: I0128 12:28:58.290104 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 12:28:58 crc kubenswrapper[4685]: I0128 12:28:58.929765 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 12:28:59 crc kubenswrapper[4685]: I0128 12:28:59.009053 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 12:28:59 crc kubenswrapper[4685]: I0128 12:28:59.439029 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 12:28:59 crc kubenswrapper[4685]: I0128 12:28:59.545878 4685 scope.go:117] "RemoveContainer" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:28:59 crc kubenswrapper[4685]: E0128 12:28:59.546666 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:28:59 crc kubenswrapper[4685]: I0128 12:28:59.644159 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 12:28:59 crc kubenswrapper[4685]: I0128 12:28:59.781578 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 12:28:59 crc kubenswrapper[4685]: I0128 12:28:59.781597 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.113854 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.288023 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.403388 4685 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.540878 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.659264 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.761283 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.774634 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 12:29:00 crc kubenswrapper[4685]: I0128 12:29:00.899769 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:29:01 crc kubenswrapper[4685]: I0128 12:29:01.271547 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:29:01 crc kubenswrapper[4685]: I0128 12:29:01.944418 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 12:29:02 crc kubenswrapper[4685]: I0128 12:29:02.275661 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 12:29:02 crc kubenswrapper[4685]: I0128 12:29:02.534953 4685 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 12:29:02 crc kubenswrapper[4685]: I0128 12:29:02.636982 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 12:29:02 crc kubenswrapper[4685]: I0128 12:29:02.922778 4685 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 12:29:03 crc kubenswrapper[4685]: I0128 12:29:03.457350 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 12:29:03 crc kubenswrapper[4685]: I0128 12:29:03.519706 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 12:29:03 crc kubenswrapper[4685]: I0128 12:29:03.576406 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 12:29:03 crc kubenswrapper[4685]: I0128 12:29:03.958092 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 12:29:04 crc kubenswrapper[4685]: I0128 12:29:04.048526 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 12:29:04 crc kubenswrapper[4685]: I0128 12:29:04.100837 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 12:29:04 crc kubenswrapper[4685]: I0128 12:29:04.316161 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 12:29:04 crc kubenswrapper[4685]: I0128 12:29:04.455191 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 12:29:04 crc kubenswrapper[4685]: I0128 12:29:04.793977 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 12:29:05 crc kubenswrapper[4685]: I0128 12:29:05.448292 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 12:29:05 crc kubenswrapper[4685]: I0128 12:29:05.501112 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 12:29:05 crc kubenswrapper[4685]: I0128 12:29:05.711845 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 12:29:05 crc kubenswrapper[4685]: I0128 12:29:05.806314 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.143026 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.144214 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.246328 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.604575 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.766668 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.790135 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.883405 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 12:29:06 crc kubenswrapper[4685]: I0128 12:29:06.903271 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 12:29:07 crc kubenswrapper[4685]: I0128 12:29:07.288816 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 12:29:07 crc kubenswrapper[4685]: I0128 12:29:07.379599 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 12:29:07 crc kubenswrapper[4685]: I0128 12:29:07.768728 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 12:29:07 crc kubenswrapper[4685]: I0128 12:29:07.826039 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 12:29:08 crc kubenswrapper[4685]: I0128 12:29:08.186351 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 12:29:08 crc kubenswrapper[4685]: I0128 12:29:08.318799 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:29:08 crc kubenswrapper[4685]: I0128 12:29:08.660566 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 12:29:08 crc kubenswrapper[4685]: I0128 12:29:08.706374 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 12:29:08 crc kubenswrapper[4685]: I0128 12:29:08.747120 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 12:29:08 crc kubenswrapper[4685]: I0128 12:29:08.980857 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 12:29:09 crc kubenswrapper[4685]: I0128 12:29:09.059787 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 12:29:09 crc kubenswrapper[4685]: I0128 12:29:09.115714 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 12:29:09 crc kubenswrapper[4685]: I0128 12:29:09.239047 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 12:29:09 crc kubenswrapper[4685]: I0128 12:29:09.586105 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 12:29:09 crc kubenswrapper[4685]: I0128 12:29:09.979286 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 12:29:10 crc kubenswrapper[4685]: I0128 12:29:10.375571 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 12:29:10 crc kubenswrapper[4685]: I0128 12:29:10.375587 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 12:29:10 crc kubenswrapper[4685]: I0128 12:29:10.476813 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 12:29:10 crc kubenswrapper[4685]: I0128 12:29:10.659634 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 12:29:10 crc kubenswrapper[4685]: I0128 12:29:10.924653 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 12:29:10 crc kubenswrapper[4685]: I0128 12:29:10.974877 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 12:29:11 crc kubenswrapper[4685]: I0128 12:29:11.035898 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:29:11 crc kubenswrapper[4685]: I0128 12:29:11.182061 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 12:29:11 crc kubenswrapper[4685]: I0128 12:29:11.203730 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 12:29:11 crc kubenswrapper[4685]: I0128 12:29:11.358140 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 12:29:11 crc kubenswrapper[4685]: I0128 12:29:11.675781 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 12:29:12 crc kubenswrapper[4685]: I0128 12:29:12.195361 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 12:29:12 crc kubenswrapper[4685]: I0128 12:29:12.635685 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 12:29:12 crc kubenswrapper[4685]: I0128 12:29:12.748156 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 12:29:12 crc kubenswrapper[4685]: I0128 12:29:12.791795 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 12:29:12 crc kubenswrapper[4685]: I0128 12:29:12.891779 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 12:29:12 crc kubenswrapper[4685]: I0128 12:29:12.987389 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.046916 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.373144 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.500958 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.532620 4685 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.546521 4685 scope.go:117] "RemoveContainer" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:29:13 crc kubenswrapper[4685]: E0128 12:29:13.546986 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-qpc29_openshift-marketplace(68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45)\"" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.681739 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.696687 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.719395 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.900225 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.910784 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 12:29:13 crc kubenswrapper[4685]: I0128 12:29:13.921676 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 12:29:14 crc kubenswrapper[4685]: I0128 12:29:14.595109 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 12:29:14 crc kubenswrapper[4685]: I0128 12:29:14.746276 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 12:29:14 crc kubenswrapper[4685]: I0128 12:29:14.780903 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 12:29:14 crc kubenswrapper[4685]: I0128 12:29:14.796415 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 12:29:14 crc kubenswrapper[4685]: I0128 12:29:14.936963 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.053626 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.094956 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.144343 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.334224 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.608369 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.722631 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.741256 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 12:29:15 crc kubenswrapper[4685]: I0128 12:29:15.838582 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 12:29:16 crc kubenswrapper[4685]: I0128 12:29:16.203019 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 12:29:16 crc kubenswrapper[4685]: I0128 12:29:16.518046 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 12:29:16 crc kubenswrapper[4685]: I0128 12:29:16.783541 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 12:29:17 crc kubenswrapper[4685]: I0128 12:29:17.634950 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 12:29:17 crc kubenswrapper[4685]: I0128 12:29:17.700511 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 12:29:18 crc kubenswrapper[4685]: I0128 12:29:18.101284 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 12:29:18 crc kubenswrapper[4685]: I0128 12:29:18.152730 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 12:29:18 crc kubenswrapper[4685]: I0128 12:29:18.494732 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 12:29:18 crc kubenswrapper[4685]: I0128 12:29:18.563143 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 12:29:19 crc kubenswrapper[4685]: I0128 12:29:19.373846 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 12:29:19 crc kubenswrapper[4685]: I0128 12:29:19.553242 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 12:29:19 crc kubenswrapper[4685]: I0128 12:29:19.610265 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 12:29:19 crc kubenswrapper[4685]: I0128 12:29:19.979434 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 12:29:20 crc kubenswrapper[4685]: I0128 12:29:20.060401 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 12:29:20 crc kubenswrapper[4685]: I0128 12:29:20.231896 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 12:29:20 crc kubenswrapper[4685]: I0128 12:29:20.240435 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 12:29:20 crc kubenswrapper[4685]: I0128 12:29:20.333872 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 12:29:20 crc kubenswrapper[4685]: I0128 12:29:20.674267 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 12:29:20 crc kubenswrapper[4685]: I0128 12:29:20.794345 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 12:29:20 crc kubenswrapper[4685]: I0128 12:29:20.994105 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 12:29:21 crc kubenswrapper[4685]: I0128 12:29:21.157488 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 12:29:21 crc kubenswrapper[4685]: I0128 12:29:21.424157 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 12:29:22 crc kubenswrapper[4685]: I0128 12:29:22.043859 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 12:29:22 crc kubenswrapper[4685]: I0128 12:29:22.101620 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 12:29:22 crc kubenswrapper[4685]: I0128 12:29:22.458756 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 12:29:22 crc kubenswrapper[4685]: I0128 12:29:22.544651 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 12:29:22 crc kubenswrapper[4685]: I0128 12:29:22.571964 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 12:29:22 crc kubenswrapper[4685]: I0128 12:29:22.649316 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:29:22 crc kubenswrapper[4685]: I0128 12:29:22.663920 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.036293 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.131020 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.225126 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.546488 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.546536 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.551666 4685 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6decb284-ec76-47d0-85eb-807d2d91a68e" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.552563 4685 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://c4c262ec97917cbaa49589c64f1e981e271bb301c6c40ba195a5b13b05c8b0cc" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.552598 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.821955 4685 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.821992 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="58bc04bb-fcb0-42e3-8225-369bc0272d41" Jan 28 12:29:23 crc kubenswrapper[4685]: I0128 12:29:23.825314 4685 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6decb284-ec76-47d0-85eb-807d2d91a68e" Jan 28 12:29:24 crc kubenswrapper[4685]: I0128 12:29:24.021474 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 12:29:24 crc kubenswrapper[4685]: I0128 12:29:24.196463 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 12:29:24 crc kubenswrapper[4685]: I0128 12:29:24.252827 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 12:29:24 crc kubenswrapper[4685]: I0128 12:29:24.627813 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 12:29:24 crc kubenswrapper[4685]: I0128 12:29:24.868021 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 12:29:24 crc kubenswrapper[4685]: I0128 12:29:24.888118 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 12:29:24 crc kubenswrapper[4685]: I0128 12:29:24.972556 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.049582 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.199648 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.251141 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.262603 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.360370 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.546025 4685 scope.go:117] "RemoveContainer" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.551372 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.844008 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-4h4rx_91fd9e56-9836-4427-b58e-9c0742895c7a/machine-approver-controller/1.log" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.845685 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-4h4rx_91fd9e56-9836-4427-b58e-9c0742895c7a/machine-approver-controller/0.log" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.846709 4685 generic.go:334] "Generic (PLEG): container finished" podID="91fd9e56-9836-4427-b58e-9c0742895c7a" containerID="b4a1452a72b69c520be8b025342201f8e52b59f5411990c5184fe76ee28f219f" exitCode=255 Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.846816 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" event={"ID":"91fd9e56-9836-4427-b58e-9c0742895c7a","Type":"ContainerDied","Data":"b4a1452a72b69c520be8b025342201f8e52b59f5411990c5184fe76ee28f219f"} Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.846918 4685 scope.go:117] "RemoveContainer" containerID="1b74a283622c2ebcb056d662da66d602d7c689d34dd83561eef92054ee9cb02e" Jan 28 12:29:25 crc kubenswrapper[4685]: I0128 12:29:25.848920 4685 scope.go:117] "RemoveContainer" containerID="b4a1452a72b69c520be8b025342201f8e52b59f5411990c5184fe76ee28f219f" Jan 28 12:29:25 crc kubenswrapper[4685]: E0128 12:29:25.849614 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-approver-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=machine-approver-controller pod=machine-approver-56656f9798-4h4rx_openshift-cluster-machine-approver(91fd9e56-9836-4427-b58e-9c0742895c7a)\"" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" podUID="91fd9e56-9836-4427-b58e-9c0742895c7a" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.103948 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.685816 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.855955 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-4h4rx_91fd9e56-9836-4427-b58e-9c0742895c7a/machine-approver-controller/1.log" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.860754 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/3.log" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.860902 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerStarted","Data":"e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1"} Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.861477 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.865517 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.865553 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/1.log" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.866684 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/0.log" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.866780 4685 generic.go:334] "Generic (PLEG): container finished" podID="0aa6b49a-8078-44f4-b1a9-2542d5bad461" containerID="5f6f5ce608a8579961e3cee2b2a7aac798708f1b8cc73332fdde6627c9c54f89" exitCode=255 Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.866840 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" event={"ID":"0aa6b49a-8078-44f4-b1a9-2542d5bad461","Type":"ContainerDied","Data":"5f6f5ce608a8579961e3cee2b2a7aac798708f1b8cc73332fdde6627c9c54f89"} Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.866934 4685 scope.go:117] "RemoveContainer" containerID="a19cc0691543a9561ca9cb11f863d04005f966847556e2328724e6f08273f7f4" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.867720 4685 scope.go:117] "RemoveContainer" containerID="5f6f5ce608a8579961e3cee2b2a7aac798708f1b8cc73332fdde6627c9c54f89" Jan 28 12:29:26 crc kubenswrapper[4685]: E0128 12:29:26.868119 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"control-plane-machine-set-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=control-plane-machine-set-operator pod=control-plane-machine-set-operator-78cbb6b69f-jbtd9_openshift-machine-api(0aa6b49a-8078-44f4-b1a9-2542d5bad461)\"" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" podUID="0aa6b49a-8078-44f4-b1a9-2542d5bad461" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.981685 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 12:29:26 crc kubenswrapper[4685]: I0128 12:29:26.997572 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 12:29:27 crc kubenswrapper[4685]: I0128 12:29:27.371949 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 12:29:27 crc kubenswrapper[4685]: I0128 12:29:27.504081 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 12:29:27 crc kubenswrapper[4685]: I0128 12:29:27.876022 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/1.log" Jan 28 12:29:28 crc kubenswrapper[4685]: I0128 12:29:28.116221 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 12:29:29 crc kubenswrapper[4685]: I0128 12:29:29.248504 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 12:29:30 crc kubenswrapper[4685]: I0128 12:29:30.178804 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 12:29:30 crc kubenswrapper[4685]: I0128 12:29:30.225960 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 12:29:31 crc kubenswrapper[4685]: I0128 12:29:31.037982 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 12:29:31 crc kubenswrapper[4685]: I0128 12:29:31.220053 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 12:29:31 crc kubenswrapper[4685]: I0128 12:29:31.945192 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 12:29:32 crc kubenswrapper[4685]: I0128 12:29:32.726543 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 12:29:33 crc kubenswrapper[4685]: I0128 12:29:33.025019 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 12:29:33 crc kubenswrapper[4685]: I0128 12:29:33.287082 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 12:29:33 crc kubenswrapper[4685]: I0128 12:29:33.601614 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 12:29:33 crc kubenswrapper[4685]: I0128 12:29:33.819569 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 12:29:35 crc kubenswrapper[4685]: I0128 12:29:35.254892 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 12:29:36 crc kubenswrapper[4685]: I0128 12:29:36.223139 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 12:29:38 crc kubenswrapper[4685]: I0128 12:29:38.653919 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:29:38 crc kubenswrapper[4685]: I0128 12:29:38.738827 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 12:29:39 crc kubenswrapper[4685]: I0128 12:29:39.172902 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 12:29:39 crc kubenswrapper[4685]: I0128 12:29:39.545988 4685 scope.go:117] "RemoveContainer" containerID="5f6f5ce608a8579961e3cee2b2a7aac798708f1b8cc73332fdde6627c9c54f89" Jan 28 12:29:39 crc kubenswrapper[4685]: I0128 12:29:39.568616 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 12:29:39 crc kubenswrapper[4685]: I0128 12:29:39.953640 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/1.log" Jan 28 12:29:39 crc kubenswrapper[4685]: I0128 12:29:39.953721 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jbtd9" event={"ID":"0aa6b49a-8078-44f4-b1a9-2542d5bad461","Type":"ContainerStarted","Data":"cd4dad06e526566526efd7560249393ed8aaa5501746a8bc0d7bbb1fd28ab791"} Jan 28 12:29:40 crc kubenswrapper[4685]: I0128 12:29:40.156081 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 12:29:40 crc kubenswrapper[4685]: I0128 12:29:40.565217 4685 scope.go:117] "RemoveContainer" containerID="b4a1452a72b69c520be8b025342201f8e52b59f5411990c5184fe76ee28f219f" Jan 28 12:29:41 crc kubenswrapper[4685]: I0128 12:29:41.965153 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-4h4rx_91fd9e56-9836-4427-b58e-9c0742895c7a/machine-approver-controller/1.log" Jan 28 12:29:41 crc kubenswrapper[4685]: I0128 12:29:41.966253 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4h4rx" event={"ID":"91fd9e56-9836-4427-b58e-9c0742895c7a","Type":"ContainerStarted","Data":"9f2769851424f5acdee704000b39fe20daf4c3642b1df20071ec159417462b6a"} Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.057610 4685 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.059303 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wl9lt" podStartSLOduration=122.107617783 podStartE2EDuration="6m30.059274445s" podCreationTimestamp="2026-01-28 12:23:14 +0000 UTC" firstStartedPulling="2026-01-28 12:23:23.026331992 +0000 UTC m=+154.113745827" lastFinishedPulling="2026-01-28 12:27:50.977988654 +0000 UTC m=+422.065402489" observedRunningTime="2026-01-28 12:27:57.156461757 +0000 UTC m=+428.243875592" watchObservedRunningTime="2026-01-28 12:29:44.059274445 +0000 UTC m=+535.146688310" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.059980 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=201.059971305 podStartE2EDuration="3m21.059971305s" podCreationTimestamp="2026-01-28 12:26:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:27:56.864324687 +0000 UTC m=+427.951738532" watchObservedRunningTime="2026-01-28 12:29:44.059971305 +0000 UTC m=+535.147385180" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.060070 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8cf72" podStartSLOduration=124.083023645 podStartE2EDuration="6m32.060064187s" podCreationTimestamp="2026-01-28 12:23:12 +0000 UTC" firstStartedPulling="2026-01-28 12:23:22.019022298 +0000 UTC m=+153.106436173" lastFinishedPulling="2026-01-28 12:27:49.99606288 +0000 UTC m=+421.083476715" observedRunningTime="2026-01-28 12:27:57.080204236 +0000 UTC m=+428.167618081" watchObservedRunningTime="2026-01-28 12:29:44.060064187 +0000 UTC m=+535.147478062" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.060449 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ntg69" podStartSLOduration=121.786511192 podStartE2EDuration="6m33.060441738s" podCreationTimestamp="2026-01-28 12:23:11 +0000 UTC" firstStartedPulling="2026-01-28 12:23:21.008676011 +0000 UTC m=+152.096089846" lastFinishedPulling="2026-01-28 12:27:52.282606557 +0000 UTC m=+423.370020392" observedRunningTime="2026-01-28 12:27:57.108411916 +0000 UTC m=+428.195825751" watchObservedRunningTime="2026-01-28 12:29:44.060441738 +0000 UTC m=+535.147855613" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.061659 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gvfhc" podStartSLOduration=124.511326705 podStartE2EDuration="6m33.061645963s" podCreationTimestamp="2026-01-28 12:23:11 +0000 UTC" firstStartedPulling="2026-01-28 12:23:22.019580065 +0000 UTC m=+153.106993930" lastFinishedPulling="2026-01-28 12:27:50.569899353 +0000 UTC m=+421.657313188" observedRunningTime="2026-01-28 12:27:57.065543885 +0000 UTC m=+428.152957740" watchObservedRunningTime="2026-01-28 12:29:44.061645963 +0000 UTC m=+535.149059838" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.063396 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-924cb" podStartSLOduration=124.280253943 podStartE2EDuration="6m34.063384402s" podCreationTimestamp="2026-01-28 12:23:10 +0000 UTC" firstStartedPulling="2026-01-28 12:23:22.018934795 +0000 UTC m=+153.106348630" lastFinishedPulling="2026-01-28 12:27:51.802065254 +0000 UTC m=+422.889479089" observedRunningTime="2026-01-28 12:27:57.042731951 +0000 UTC m=+428.130145786" watchObservedRunningTime="2026-01-28 12:29:44.063384402 +0000 UTC m=+535.150798277" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.064302 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9q8q2" podStartSLOduration=123.2605934 podStartE2EDuration="6m34.064291959s" podCreationTimestamp="2026-01-28 12:23:10 +0000 UTC" firstStartedPulling="2026-01-28 12:23:18.99001732 +0000 UTC m=+150.077431155" lastFinishedPulling="2026-01-28 12:27:49.793715829 +0000 UTC m=+420.881129714" observedRunningTime="2026-01-28 12:27:57.137392669 +0000 UTC m=+428.224806514" watchObservedRunningTime="2026-01-28 12:29:44.064291959 +0000 UTC m=+535.151705834" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.064650 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5zhxv" podStartSLOduration=120.431466531 podStartE2EDuration="6m30.064643709s" podCreationTimestamp="2026-01-28 12:23:14 +0000 UTC" firstStartedPulling="2026-01-28 12:23:22.015923442 +0000 UTC m=+153.103337287" lastFinishedPulling="2026-01-28 12:27:51.64910063 +0000 UTC m=+422.736514465" observedRunningTime="2026-01-28 12:27:57.17189353 +0000 UTC m=+428.259307405" watchObservedRunningTime="2026-01-28 12:29:44.064643709 +0000 UTC m=+535.152057584" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.065208 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-smscz" podStartSLOduration=121.778115476 podStartE2EDuration="6m31.065167424s" podCreationTimestamp="2026-01-28 12:23:13 +0000 UTC" firstStartedPulling="2026-01-28 12:23:22.019801482 +0000 UTC m=+153.107215327" lastFinishedPulling="2026-01-28 12:27:51.30685344 +0000 UTC m=+422.394267275" observedRunningTime="2026-01-28 12:27:57.095091754 +0000 UTC m=+428.182505589" watchObservedRunningTime="2026-01-28 12:29:44.065167424 +0000 UTC m=+535.152581309" Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.066507 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.066580 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 12:29:44 crc kubenswrapper[4685]: I0128 12:29:44.096734 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=107.096710419 podStartE2EDuration="1m47.096710419s" podCreationTimestamp="2026-01-28 12:27:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:29:44.092596451 +0000 UTC m=+535.180010326" watchObservedRunningTime="2026-01-28 12:29:44.096710419 +0000 UTC m=+535.184124254" Jan 28 12:29:46 crc kubenswrapper[4685]: I0128 12:29:46.521772 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 12:29:50 crc kubenswrapper[4685]: I0128 12:29:50.136898 4685 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:29:50 crc kubenswrapper[4685]: I0128 12:29:50.137528 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa" gracePeriod=5 Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.878532 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.879103 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990758 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990833 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990858 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990884 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990917 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990943 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990979 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.990962 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.991022 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.991430 4685 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.991458 4685 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.991469 4685 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 28 12:29:55 crc kubenswrapper[4685]: I0128 12:29:55.991480 4685 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.012392 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.080120 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.080740 4685 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa" exitCode=137 Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.080880 4685 scope.go:117] "RemoveContainer" containerID="c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.081071 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.092019 4685 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.095654 4685 scope.go:117] "RemoveContainer" containerID="c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa" Jan 28 12:29:56 crc kubenswrapper[4685]: E0128 12:29:56.095967 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa\": container with ID starting with c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa not found: ID does not exist" containerID="c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.096005 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa"} err="failed to get container status \"c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa\": rpc error: code = NotFound desc = could not find container \"c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa\": container with ID starting with c0b1b466f22927e863a21329223f1951e5b7c493db7c302c1577d4bb3c098afa not found: ID does not exist" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.555869 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.556802 4685 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.568409 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.568453 4685 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="7de741fb-72d1-438c-bade-6eaef78626c3" Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.572759 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 12:29:56 crc kubenswrapper[4685]: I0128 12:29:56.572797 4685 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="7de741fb-72d1-438c-bade-6eaef78626c3" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.203021 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt"] Jan 28 12:30:00 crc kubenswrapper[4685]: E0128 12:30:00.203552 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.203569 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 12:30:00 crc kubenswrapper[4685]: E0128 12:30:00.203586 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" containerName="installer" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.203593 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" containerName="installer" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.203716 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fd0033f-a806-4189-8e54-f83b1f72f37d" containerName="installer" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.203731 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.204129 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.207305 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.207374 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.240068 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt"] Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.243814 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg9vt\" (UniqueName: \"kubernetes.io/projected/282eef70-634d-443e-a385-5b96fd905fee-kube-api-access-sg9vt\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.243909 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/282eef70-634d-443e-a385-5b96fd905fee-secret-volume\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.243966 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/282eef70-634d-443e-a385-5b96fd905fee-config-volume\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.345868 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/282eef70-634d-443e-a385-5b96fd905fee-secret-volume\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.345988 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/282eef70-634d-443e-a385-5b96fd905fee-config-volume\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.346036 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg9vt\" (UniqueName: \"kubernetes.io/projected/282eef70-634d-443e-a385-5b96fd905fee-kube-api-access-sg9vt\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.347697 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/282eef70-634d-443e-a385-5b96fd905fee-config-volume\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.359622 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/282eef70-634d-443e-a385-5b96fd905fee-secret-volume\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.371040 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg9vt\" (UniqueName: \"kubernetes.io/projected/282eef70-634d-443e-a385-5b96fd905fee-kube-api-access-sg9vt\") pod \"collect-profiles-29493390-9fkpt\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:00 crc kubenswrapper[4685]: I0128 12:30:00.556652 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:01 crc kubenswrapper[4685]: I0128 12:30:01.000485 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt"] Jan 28 12:30:01 crc kubenswrapper[4685]: I0128 12:30:01.114107 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" event={"ID":"282eef70-634d-443e-a385-5b96fd905fee","Type":"ContainerStarted","Data":"5d50e9c35d71b21413627d02c1c76a9bc6fe8ea1b096dfb8490ba4f345a3a1b7"} Jan 28 12:30:02 crc kubenswrapper[4685]: I0128 12:30:02.123636 4685 generic.go:334] "Generic (PLEG): container finished" podID="282eef70-634d-443e-a385-5b96fd905fee" containerID="a928b44f1eaa8078d1ef9cea85839c7a94fbeb2347fd13284ae2055888d96e13" exitCode=0 Jan 28 12:30:02 crc kubenswrapper[4685]: I0128 12:30:02.123700 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" event={"ID":"282eef70-634d-443e-a385-5b96fd905fee","Type":"ContainerDied","Data":"a928b44f1eaa8078d1ef9cea85839c7a94fbeb2347fd13284ae2055888d96e13"} Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.101040 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77bf78bfc6-kww85"] Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.101300 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerName="controller-manager" containerID="cri-o://2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be" gracePeriod=30 Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.251374 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s"] Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.251616 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" podUID="134521a2-eac8-4019-b5f0-847968893cb8" containerName="route-controller-manager" containerID="cri-o://b4caa62c8fe922578a05535950936738156dd348895c064399e997bad3ff2703" gracePeriod=30 Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.634069 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.686454 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/282eef70-634d-443e-a385-5b96fd905fee-secret-volume\") pod \"282eef70-634d-443e-a385-5b96fd905fee\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.686550 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sg9vt\" (UniqueName: \"kubernetes.io/projected/282eef70-634d-443e-a385-5b96fd905fee-kube-api-access-sg9vt\") pod \"282eef70-634d-443e-a385-5b96fd905fee\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.686599 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/282eef70-634d-443e-a385-5b96fd905fee-config-volume\") pod \"282eef70-634d-443e-a385-5b96fd905fee\" (UID: \"282eef70-634d-443e-a385-5b96fd905fee\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.687359 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/282eef70-634d-443e-a385-5b96fd905fee-config-volume" (OuterVolumeSpecName: "config-volume") pod "282eef70-634d-443e-a385-5b96fd905fee" (UID: "282eef70-634d-443e-a385-5b96fd905fee"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.691688 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/282eef70-634d-443e-a385-5b96fd905fee-kube-api-access-sg9vt" (OuterVolumeSpecName: "kube-api-access-sg9vt") pod "282eef70-634d-443e-a385-5b96fd905fee" (UID: "282eef70-634d-443e-a385-5b96fd905fee"). InnerVolumeSpecName "kube-api-access-sg9vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.691807 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/282eef70-634d-443e-a385-5b96fd905fee-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "282eef70-634d-443e-a385-5b96fd905fee" (UID: "282eef70-634d-443e-a385-5b96fd905fee"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.787087 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sg9vt\" (UniqueName: \"kubernetes.io/projected/282eef70-634d-443e-a385-5b96fd905fee-kube-api-access-sg9vt\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.787414 4685 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/282eef70-634d-443e-a385-5b96fd905fee-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.787427 4685 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/282eef70-634d-443e-a385-5b96fd905fee-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.787697 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.887603 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-proxy-ca-bundles\") pod \"e52fd026-03db-4c60-bb63-d2f20b43855a\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.887646 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-config\") pod \"e52fd026-03db-4c60-bb63-d2f20b43855a\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.887679 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-client-ca\") pod \"e52fd026-03db-4c60-bb63-d2f20b43855a\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.887698 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlwpg\" (UniqueName: \"kubernetes.io/projected/e52fd026-03db-4c60-bb63-d2f20b43855a-kube-api-access-nlwpg\") pod \"e52fd026-03db-4c60-bb63-d2f20b43855a\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.887751 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e52fd026-03db-4c60-bb63-d2f20b43855a-serving-cert\") pod \"e52fd026-03db-4c60-bb63-d2f20b43855a\" (UID: \"e52fd026-03db-4c60-bb63-d2f20b43855a\") " Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.888707 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "e52fd026-03db-4c60-bb63-d2f20b43855a" (UID: "e52fd026-03db-4c60-bb63-d2f20b43855a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.888753 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-config" (OuterVolumeSpecName: "config") pod "e52fd026-03db-4c60-bb63-d2f20b43855a" (UID: "e52fd026-03db-4c60-bb63-d2f20b43855a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.888809 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-client-ca" (OuterVolumeSpecName: "client-ca") pod "e52fd026-03db-4c60-bb63-d2f20b43855a" (UID: "e52fd026-03db-4c60-bb63-d2f20b43855a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.891389 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e52fd026-03db-4c60-bb63-d2f20b43855a-kube-api-access-nlwpg" (OuterVolumeSpecName: "kube-api-access-nlwpg") pod "e52fd026-03db-4c60-bb63-d2f20b43855a" (UID: "e52fd026-03db-4c60-bb63-d2f20b43855a"). InnerVolumeSpecName "kube-api-access-nlwpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.891822 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e52fd026-03db-4c60-bb63-d2f20b43855a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e52fd026-03db-4c60-bb63-d2f20b43855a" (UID: "e52fd026-03db-4c60-bb63-d2f20b43855a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.989293 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.989351 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlwpg\" (UniqueName: \"kubernetes.io/projected/e52fd026-03db-4c60-bb63-d2f20b43855a-kube-api-access-nlwpg\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.989372 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e52fd026-03db-4c60-bb63-d2f20b43855a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.989388 4685 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:03 crc kubenswrapper[4685]: I0128 12:30:03.989407 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e52fd026-03db-4c60-bb63-d2f20b43855a-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.134939 4685 generic.go:334] "Generic (PLEG): container finished" podID="134521a2-eac8-4019-b5f0-847968893cb8" containerID="b4caa62c8fe922578a05535950936738156dd348895c064399e997bad3ff2703" exitCode=0 Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.135057 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" event={"ID":"134521a2-eac8-4019-b5f0-847968893cb8","Type":"ContainerDied","Data":"b4caa62c8fe922578a05535950936738156dd348895c064399e997bad3ff2703"} Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.136819 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" event={"ID":"282eef70-634d-443e-a385-5b96fd905fee","Type":"ContainerDied","Data":"5d50e9c35d71b21413627d02c1c76a9bc6fe8ea1b096dfb8490ba4f345a3a1b7"} Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.136851 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493390-9fkpt" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.136866 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d50e9c35d71b21413627d02c1c76a9bc6fe8ea1b096dfb8490ba4f345a3a1b7" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.139331 4685 generic.go:334] "Generic (PLEG): container finished" podID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerID="2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be" exitCode=0 Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.139385 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" event={"ID":"e52fd026-03db-4c60-bb63-d2f20b43855a","Type":"ContainerDied","Data":"2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be"} Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.139413 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" event={"ID":"e52fd026-03db-4c60-bb63-d2f20b43855a","Type":"ContainerDied","Data":"8c40e199b77aa8a120ae8a0561235bc0dbe4a9329942a19d467e0488864ca03f"} Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.139439 4685 scope.go:117] "RemoveContainer" containerID="2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.139391 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77bf78bfc6-kww85" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.154481 4685 scope.go:117] "RemoveContainer" containerID="f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.174155 4685 scope.go:117] "RemoveContainer" containerID="2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be" Jan 28 12:30:04 crc kubenswrapper[4685]: E0128 12:30:04.175368 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be\": container with ID starting with 2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be not found: ID does not exist" containerID="2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.175408 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be"} err="failed to get container status \"2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be\": rpc error: code = NotFound desc = could not find container \"2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be\": container with ID starting with 2359eeda70c3434f671bacf8c563acd65dca0071b15c4d41aec5b9fa17ac67be not found: ID does not exist" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.175437 4685 scope.go:117] "RemoveContainer" containerID="f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234" Jan 28 12:30:04 crc kubenswrapper[4685]: E0128 12:30:04.175621 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234\": container with ID starting with f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234 not found: ID does not exist" containerID="f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.175636 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234"} err="failed to get container status \"f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234\": rpc error: code = NotFound desc = could not find container \"f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234\": container with ID starting with f188e910ef1a7c3fb8f5a964ffc658021c16719873f1f6b0912c81b1cb8dc234 not found: ID does not exist" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.182240 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77bf78bfc6-kww85"] Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.185745 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-77bf78bfc6-kww85"] Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.422380 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.553000 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" path="/var/lib/kubelet/pods/e52fd026-03db-4c60-bb63-d2f20b43855a/volumes" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.612575 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz4bt\" (UniqueName: \"kubernetes.io/projected/134521a2-eac8-4019-b5f0-847968893cb8-kube-api-access-mz4bt\") pod \"134521a2-eac8-4019-b5f0-847968893cb8\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.613044 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-config\") pod \"134521a2-eac8-4019-b5f0-847968893cb8\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.613097 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-client-ca\") pod \"134521a2-eac8-4019-b5f0-847968893cb8\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.613132 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/134521a2-eac8-4019-b5f0-847968893cb8-serving-cert\") pod \"134521a2-eac8-4019-b5f0-847968893cb8\" (UID: \"134521a2-eac8-4019-b5f0-847968893cb8\") " Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.614026 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-config" (OuterVolumeSpecName: "config") pod "134521a2-eac8-4019-b5f0-847968893cb8" (UID: "134521a2-eac8-4019-b5f0-847968893cb8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.614386 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-client-ca" (OuterVolumeSpecName: "client-ca") pod "134521a2-eac8-4019-b5f0-847968893cb8" (UID: "134521a2-eac8-4019-b5f0-847968893cb8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.621132 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/134521a2-eac8-4019-b5f0-847968893cb8-kube-api-access-mz4bt" (OuterVolumeSpecName: "kube-api-access-mz4bt") pod "134521a2-eac8-4019-b5f0-847968893cb8" (UID: "134521a2-eac8-4019-b5f0-847968893cb8"). InnerVolumeSpecName "kube-api-access-mz4bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.621320 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/134521a2-eac8-4019-b5f0-847968893cb8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "134521a2-eac8-4019-b5f0-847968893cb8" (UID: "134521a2-eac8-4019-b5f0-847968893cb8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.713826 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.713859 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/134521a2-eac8-4019-b5f0-847968893cb8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.713868 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mz4bt\" (UniqueName: \"kubernetes.io/projected/134521a2-eac8-4019-b5f0-847968893cb8-kube-api-access-mz4bt\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:04 crc kubenswrapper[4685]: I0128 12:30:04.713880 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/134521a2-eac8-4019-b5f0-847968893cb8-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.148360 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" event={"ID":"134521a2-eac8-4019-b5f0-847968893cb8","Type":"ContainerDied","Data":"86ca680e03b9dc580d24b3740656c20bbc77d9e411a4229b79433455845dc59c"} Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.148425 4685 scope.go:117] "RemoveContainer" containerID="b4caa62c8fe922578a05535950936738156dd348895c064399e997bad3ff2703" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.148440 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.177374 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s"] Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.181930 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-856d844f48-4zg4s"] Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.427955 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz"] Jan 28 12:30:05 crc kubenswrapper[4685]: E0128 12:30:05.428551 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="282eef70-634d-443e-a385-5b96fd905fee" containerName="collect-profiles" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428563 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="282eef70-634d-443e-a385-5b96fd905fee" containerName="collect-profiles" Jan 28 12:30:05 crc kubenswrapper[4685]: E0128 12:30:05.428572 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerName="controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428579 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerName="controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: E0128 12:30:05.428590 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="134521a2-eac8-4019-b5f0-847968893cb8" containerName="route-controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428598 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="134521a2-eac8-4019-b5f0-847968893cb8" containerName="route-controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: E0128 12:30:05.428606 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerName="controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428612 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerName="controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428714 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="134521a2-eac8-4019-b5f0-847968893cb8" containerName="route-controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428724 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerName="controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428736 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e52fd026-03db-4c60-bb63-d2f20b43855a" containerName="controller-manager" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.428747 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="282eef70-634d-443e-a385-5b96fd905fee" containerName="collect-profiles" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.429100 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.430979 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.431648 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.432433 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.433955 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-76d5b7b487-7g56j"] Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.434560 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.435119 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.435928 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.436832 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.442013 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.442133 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.442452 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.442557 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.442685 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.447692 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76d5b7b487-7g56j"] Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.447891 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.451452 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.455871 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz"] Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.624878 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnnll\" (UniqueName: \"kubernetes.io/projected/4a297f4a-7dae-4c97-b91c-f9f63b51e838-kube-api-access-gnnll\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.624933 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a297f4a-7dae-4c97-b91c-f9f63b51e838-serving-cert\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.624966 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-client-ca\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.624996 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-client-ca\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.625022 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb2kc\" (UniqueName: \"kubernetes.io/projected/639fc84c-2f84-4a92-90f8-8e83c7155bb8-kube-api-access-fb2kc\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.625098 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/639fc84c-2f84-4a92-90f8-8e83c7155bb8-serving-cert\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.625141 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-config\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.625182 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-config\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.625215 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-proxy-ca-bundles\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.726070 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnnll\" (UniqueName: \"kubernetes.io/projected/4a297f4a-7dae-4c97-b91c-f9f63b51e838-kube-api-access-gnnll\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.726747 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a297f4a-7dae-4c97-b91c-f9f63b51e838-serving-cert\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.727022 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-client-ca\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.727317 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-client-ca\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.727575 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb2kc\" (UniqueName: \"kubernetes.io/projected/639fc84c-2f84-4a92-90f8-8e83c7155bb8-kube-api-access-fb2kc\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.727857 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/639fc84c-2f84-4a92-90f8-8e83c7155bb8-serving-cert\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.728152 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-config\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.728426 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-config\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.728672 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-proxy-ca-bundles\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.728459 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-client-ca\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.730357 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-client-ca\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.730370 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-config\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.730613 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-config\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.732888 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-proxy-ca-bundles\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.734366 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a297f4a-7dae-4c97-b91c-f9f63b51e838-serving-cert\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.734696 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/639fc84c-2f84-4a92-90f8-8e83c7155bb8-serving-cert\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.750425 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb2kc\" (UniqueName: \"kubernetes.io/projected/639fc84c-2f84-4a92-90f8-8e83c7155bb8-kube-api-access-fb2kc\") pod \"route-controller-manager-665c9c5d9f-nwjvz\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.750717 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.760567 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnnll\" (UniqueName: \"kubernetes.io/projected/4a297f4a-7dae-4c97-b91c-f9f63b51e838-kube-api-access-gnnll\") pod \"controller-manager-76d5b7b487-7g56j\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:05 crc kubenswrapper[4685]: I0128 12:30:05.938388 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz"] Jan 28 12:30:05 crc kubenswrapper[4685]: W0128 12:30:05.944438 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod639fc84c_2f84_4a92_90f8_8e83c7155bb8.slice/crio-f518892c04ffaec8aab27d8331ed536eae413e6e3a276b228f62fa788ea981f2 WatchSource:0}: Error finding container f518892c04ffaec8aab27d8331ed536eae413e6e3a276b228f62fa788ea981f2: Status 404 returned error can't find the container with id f518892c04ffaec8aab27d8331ed536eae413e6e3a276b228f62fa788ea981f2 Jan 28 12:30:06 crc kubenswrapper[4685]: I0128 12:30:06.059908 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:06 crc kubenswrapper[4685]: I0128 12:30:06.159027 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" event={"ID":"639fc84c-2f84-4a92-90f8-8e83c7155bb8","Type":"ContainerStarted","Data":"f518892c04ffaec8aab27d8331ed536eae413e6e3a276b228f62fa788ea981f2"} Jan 28 12:30:06 crc kubenswrapper[4685]: I0128 12:30:06.267967 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76d5b7b487-7g56j"] Jan 28 12:30:06 crc kubenswrapper[4685]: I0128 12:30:06.559071 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="134521a2-eac8-4019-b5f0-847968893cb8" path="/var/lib/kubelet/pods/134521a2-eac8-4019-b5f0-847968893cb8/volumes" Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.167723 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" event={"ID":"639fc84c-2f84-4a92-90f8-8e83c7155bb8","Type":"ContainerStarted","Data":"aee91c773c68e157180f849e8e6136f6faf7945f6f44ccad7c90f4a615d0ca80"} Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.167785 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.168872 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" event={"ID":"4a297f4a-7dae-4c97-b91c-f9f63b51e838","Type":"ContainerStarted","Data":"cbf4621a523562b9fd4dd2fd636baf91c2b1004b732d91b19ce98ed30589d2a9"} Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.168920 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" event={"ID":"4a297f4a-7dae-4c97-b91c-f9f63b51e838","Type":"ContainerStarted","Data":"8ae733130c64995d75220d44c9eeec7fda4b84f315be3e1975e57fa590425512"} Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.169412 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.173041 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.173114 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.182662 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" podStartSLOduration=4.182642649 podStartE2EDuration="4.182642649s" podCreationTimestamp="2026-01-28 12:30:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:30:07.182584398 +0000 UTC m=+558.269998253" watchObservedRunningTime="2026-01-28 12:30:07.182642649 +0000 UTC m=+558.270056484" Jan 28 12:30:07 crc kubenswrapper[4685]: I0128 12:30:07.242698 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" podStartSLOduration=4.242670832 podStartE2EDuration="4.242670832s" podCreationTimestamp="2026-01-28 12:30:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:30:07.23006545 +0000 UTC m=+558.317479285" watchObservedRunningTime="2026-01-28 12:30:07.242670832 +0000 UTC m=+558.330084667" Jan 28 12:30:23 crc kubenswrapper[4685]: I0128 12:30:23.507462 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gvfhc"] Jan 28 12:30:23 crc kubenswrapper[4685]: I0128 12:30:23.508145 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gvfhc" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="registry-server" containerID="cri-o://d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088" gracePeriod=2 Jan 28 12:30:23 crc kubenswrapper[4685]: I0128 12:30:23.703741 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ntg69"] Jan 28 12:30:23 crc kubenswrapper[4685]: I0128 12:30:23.704006 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ntg69" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="registry-server" containerID="cri-o://cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69" gracePeriod=2 Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.010129 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.187845 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfjmg\" (UniqueName: \"kubernetes.io/projected/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-kube-api-access-gfjmg\") pod \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.188043 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-utilities\") pod \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.188071 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-catalog-content\") pod \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\" (UID: \"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2\") " Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.188764 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-utilities" (OuterVolumeSpecName: "utilities") pod "94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" (UID: "94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.193705 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-kube-api-access-gfjmg" (OuterVolumeSpecName: "kube-api-access-gfjmg") pod "94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" (UID: "94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2"). InnerVolumeSpecName "kube-api-access-gfjmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.194015 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.258803 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" (UID: "94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.285940 4685 generic.go:334] "Generic (PLEG): container finished" podID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerID="cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69" exitCode=0 Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.286000 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntg69" event={"ID":"7d2d54a9-9f8c-4158-8551-c4351bca8c19","Type":"ContainerDied","Data":"cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69"} Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.286014 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntg69" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.286622 4685 scope.go:117] "RemoveContainer" containerID="cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.287391 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntg69" event={"ID":"7d2d54a9-9f8c-4158-8551-c4351bca8c19","Type":"ContainerDied","Data":"db225b671fe985f01ec3bc0725d86c5028aa2ab2b56f6012c6a2ac67b28f6c7e"} Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.289056 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.289081 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.289095 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfjmg\" (UniqueName: \"kubernetes.io/projected/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2-kube-api-access-gfjmg\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.289578 4685 generic.go:334] "Generic (PLEG): container finished" podID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerID="d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088" exitCode=0 Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.289618 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerDied","Data":"d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088"} Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.289644 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gvfhc" event={"ID":"94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2","Type":"ContainerDied","Data":"7fda746dcc79e22591455ebfc8032651b028b8c47fcaca3e47f5208e0a044310"} Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.289908 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gvfhc" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.308722 4685 scope.go:117] "RemoveContainer" containerID="9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.324080 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gvfhc"] Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.328776 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gvfhc"] Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.347344 4685 scope.go:117] "RemoveContainer" containerID="17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.362096 4685 scope.go:117] "RemoveContainer" containerID="cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69" Jan 28 12:30:24 crc kubenswrapper[4685]: E0128 12:30:24.362730 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69\": container with ID starting with cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69 not found: ID does not exist" containerID="cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.362772 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69"} err="failed to get container status \"cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69\": rpc error: code = NotFound desc = could not find container \"cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69\": container with ID starting with cd28bb854af2ba6d2f06e208aa621cdfc1fcea4e14765739aa48f3842cc91b69 not found: ID does not exist" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.362797 4685 scope.go:117] "RemoveContainer" containerID="9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d" Jan 28 12:30:24 crc kubenswrapper[4685]: E0128 12:30:24.363259 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d\": container with ID starting with 9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d not found: ID does not exist" containerID="9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.363289 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d"} err="failed to get container status \"9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d\": rpc error: code = NotFound desc = could not find container \"9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d\": container with ID starting with 9e8ca1225350fc772412a78c7bd5e9caaef6072b38416c764592d75cf13e940d not found: ID does not exist" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.363307 4685 scope.go:117] "RemoveContainer" containerID="17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99" Jan 28 12:30:24 crc kubenswrapper[4685]: E0128 12:30:24.363593 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99\": container with ID starting with 17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99 not found: ID does not exist" containerID="17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.363618 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99"} err="failed to get container status \"17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99\": rpc error: code = NotFound desc = could not find container \"17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99\": container with ID starting with 17b0fab2348cedde9986db84f51154c822881352f42ae9400db8293a35813a99 not found: ID does not exist" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.363634 4685 scope.go:117] "RemoveContainer" containerID="d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.376614 4685 scope.go:117] "RemoveContainer" containerID="e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.390417 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-catalog-content\") pod \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.390537 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-utilities\") pod \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.390563 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glfkg\" (UniqueName: \"kubernetes.io/projected/7d2d54a9-9f8c-4158-8551-c4351bca8c19-kube-api-access-glfkg\") pod \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\" (UID: \"7d2d54a9-9f8c-4158-8551-c4351bca8c19\") " Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.391471 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-utilities" (OuterVolumeSpecName: "utilities") pod "7d2d54a9-9f8c-4158-8551-c4351bca8c19" (UID: "7d2d54a9-9f8c-4158-8551-c4351bca8c19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.391692 4685 scope.go:117] "RemoveContainer" containerID="ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.393759 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d2d54a9-9f8c-4158-8551-c4351bca8c19-kube-api-access-glfkg" (OuterVolumeSpecName: "kube-api-access-glfkg") pod "7d2d54a9-9f8c-4158-8551-c4351bca8c19" (UID: "7d2d54a9-9f8c-4158-8551-c4351bca8c19"). InnerVolumeSpecName "kube-api-access-glfkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.415847 4685 scope.go:117] "RemoveContainer" containerID="d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088" Jan 28 12:30:24 crc kubenswrapper[4685]: E0128 12:30:24.416605 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088\": container with ID starting with d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088 not found: ID does not exist" containerID="d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.416667 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088"} err="failed to get container status \"d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088\": rpc error: code = NotFound desc = could not find container \"d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088\": container with ID starting with d7c9b9ef7c1464ea9eec8cfe724b3e4c3fe8493c92b6942256e10d5fe0ac9088 not found: ID does not exist" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.416714 4685 scope.go:117] "RemoveContainer" containerID="e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82" Jan 28 12:30:24 crc kubenswrapper[4685]: E0128 12:30:24.417293 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82\": container with ID starting with e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82 not found: ID does not exist" containerID="e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.417326 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82"} err="failed to get container status \"e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82\": rpc error: code = NotFound desc = could not find container \"e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82\": container with ID starting with e55e9bc8280a70b64ffa630f40f46ef5323b4df51af88b6ac6ad7d0330193c82 not found: ID does not exist" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.417351 4685 scope.go:117] "RemoveContainer" containerID="ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e" Jan 28 12:30:24 crc kubenswrapper[4685]: E0128 12:30:24.417881 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e\": container with ID starting with ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e not found: ID does not exist" containerID="ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.417965 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e"} err="failed to get container status \"ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e\": rpc error: code = NotFound desc = could not find container \"ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e\": container with ID starting with ddf9800b30801c563636b6a0a99b41cb984630b254c09cc2863cbee51ce0921e not found: ID does not exist" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.449298 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7d2d54a9-9f8c-4158-8551-c4351bca8c19" (UID: "7d2d54a9-9f8c-4158-8551-c4351bca8c19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.491877 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.491927 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d2d54a9-9f8c-4158-8551-c4351bca8c19-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.491941 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glfkg\" (UniqueName: \"kubernetes.io/projected/7d2d54a9-9f8c-4158-8551-c4351bca8c19-kube-api-access-glfkg\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.556041 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" path="/var/lib/kubelet/pods/94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2/volumes" Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.610882 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ntg69"] Jan 28 12:30:24 crc kubenswrapper[4685]: I0128 12:30:24.615550 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ntg69"] Jan 28 12:30:25 crc kubenswrapper[4685]: I0128 12:30:25.910270 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-smscz"] Jan 28 12:30:25 crc kubenswrapper[4685]: I0128 12:30:25.911925 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-smscz" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="registry-server" containerID="cri-o://09825a1e10594adcce28925c5d6d2537a1fd308ff1f40338842a902999563e97" gracePeriod=2 Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.112201 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5zhxv"] Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.112574 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5zhxv" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="registry-server" containerID="cri-o://809e6fae14ecfb2c50c23fe8025cf3d553761ff959dceffddf9bd8fef370f303" gracePeriod=2 Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.308752 4685 generic.go:334] "Generic (PLEG): container finished" podID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerID="809e6fae14ecfb2c50c23fe8025cf3d553761ff959dceffddf9bd8fef370f303" exitCode=0 Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.308833 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerDied","Data":"809e6fae14ecfb2c50c23fe8025cf3d553761ff959dceffddf9bd8fef370f303"} Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.311788 4685 generic.go:334] "Generic (PLEG): container finished" podID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerID="09825a1e10594adcce28925c5d6d2537a1fd308ff1f40338842a902999563e97" exitCode=0 Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.311829 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerDied","Data":"09825a1e10594adcce28925c5d6d2537a1fd308ff1f40338842a902999563e97"} Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.554840 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" path="/var/lib/kubelet/pods/7d2d54a9-9f8c-4158-8551-c4351bca8c19/volumes" Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.701336 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.823929 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-catalog-content\") pod \"6d558882-5a65-41ae-bcf0-d13c7cecc034\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.824045 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhtx5\" (UniqueName: \"kubernetes.io/projected/6d558882-5a65-41ae-bcf0-d13c7cecc034-kube-api-access-jhtx5\") pod \"6d558882-5a65-41ae-bcf0-d13c7cecc034\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.824075 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-utilities\") pod \"6d558882-5a65-41ae-bcf0-d13c7cecc034\" (UID: \"6d558882-5a65-41ae-bcf0-d13c7cecc034\") " Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.826073 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-utilities" (OuterVolumeSpecName: "utilities") pod "6d558882-5a65-41ae-bcf0-d13c7cecc034" (UID: "6d558882-5a65-41ae-bcf0-d13c7cecc034"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.833508 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d558882-5a65-41ae-bcf0-d13c7cecc034-kube-api-access-jhtx5" (OuterVolumeSpecName: "kube-api-access-jhtx5") pod "6d558882-5a65-41ae-bcf0-d13c7cecc034" (UID: "6d558882-5a65-41ae-bcf0-d13c7cecc034"). InnerVolumeSpecName "kube-api-access-jhtx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.916796 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.925422 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhtx5\" (UniqueName: \"kubernetes.io/projected/6d558882-5a65-41ae-bcf0-d13c7cecc034-kube-api-access-jhtx5\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.925446 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:26 crc kubenswrapper[4685]: I0128 12:30:26.943933 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d558882-5a65-41ae-bcf0-d13c7cecc034" (UID: "6d558882-5a65-41ae-bcf0-d13c7cecc034"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.028724 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-catalog-content\") pod \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.028831 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zq6fj\" (UniqueName: \"kubernetes.io/projected/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-kube-api-access-zq6fj\") pod \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.028877 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-utilities\") pod \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\" (UID: \"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01\") " Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.029224 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d558882-5a65-41ae-bcf0-d13c7cecc034-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.030869 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-utilities" (OuterVolumeSpecName: "utilities") pod "e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" (UID: "e4bd8edd-83cd-4485-b7ad-b13d5aa53a01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.033778 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-kube-api-access-zq6fj" (OuterVolumeSpecName: "kube-api-access-zq6fj") pod "e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" (UID: "e4bd8edd-83cd-4485-b7ad-b13d5aa53a01"). InnerVolumeSpecName "kube-api-access-zq6fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.059229 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" (UID: "e4bd8edd-83cd-4485-b7ad-b13d5aa53a01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.130274 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.130340 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zq6fj\" (UniqueName: \"kubernetes.io/projected/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-kube-api-access-zq6fj\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.130368 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.322183 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5zhxv" event={"ID":"6d558882-5a65-41ae-bcf0-d13c7cecc034","Type":"ContainerDied","Data":"97fb83a0024b300d10e030c6bd7d8a962ca8f6edcda6becc26ca1cfa513066fb"} Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.322507 4685 scope.go:117] "RemoveContainer" containerID="809e6fae14ecfb2c50c23fe8025cf3d553761ff959dceffddf9bd8fef370f303" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.322239 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5zhxv" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.325266 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-smscz" event={"ID":"e4bd8edd-83cd-4485-b7ad-b13d5aa53a01","Type":"ContainerDied","Data":"24a2eb8eed1cf1eddb2a5326a24d4479c5464665a24cb09cf22f47358b7f2f75"} Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.325339 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-smscz" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.344324 4685 scope.go:117] "RemoveContainer" containerID="1b8e163aa4c59e5461b2978211e65b493bf880a2b306864c46a1b56b35f4a790" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.364655 4685 scope.go:117] "RemoveContainer" containerID="534fdaede1d8e595c2d352c6ef262c9ec54acb2f9a0fa46e80b8d010e711e97a" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.403135 4685 scope.go:117] "RemoveContainer" containerID="09825a1e10594adcce28925c5d6d2537a1fd308ff1f40338842a902999563e97" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.412687 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-smscz"] Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.418878 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-smscz"] Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.427469 4685 scope.go:117] "RemoveContainer" containerID="52ccd81667dc935e9130342d889c379b844e664866d2951188f9a67b2ee72ff3" Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.431561 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5zhxv"] Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.435543 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5zhxv"] Jan 28 12:30:27 crc kubenswrapper[4685]: I0128 12:30:27.445463 4685 scope.go:117] "RemoveContainer" containerID="5c353ad8e7249cc71fa179838d756701d0cbe36903c47ceb51d14a85144186a1" Jan 28 12:30:28 crc kubenswrapper[4685]: I0128 12:30:28.552049 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" path="/var/lib/kubelet/pods/6d558882-5a65-41ae-bcf0-d13c7cecc034/volumes" Jan 28 12:30:28 crc kubenswrapper[4685]: I0128 12:30:28.552829 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" path="/var/lib/kubelet/pods/e4bd8edd-83cd-4485-b7ad-b13d5aa53a01/volumes" Jan 28 12:30:42 crc kubenswrapper[4685]: I0128 12:30:42.486045 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4gdsr"] Jan 28 12:30:57 crc kubenswrapper[4685]: I0128 12:30:57.069757 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:30:57 crc kubenswrapper[4685]: I0128 12:30:57.070708 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.125657 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-76d5b7b487-7g56j"] Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.126338 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" podUID="4a297f4a-7dae-4c97-b91c-f9f63b51e838" containerName="controller-manager" containerID="cri-o://cbf4621a523562b9fd4dd2fd636baf91c2b1004b732d91b19ce98ed30589d2a9" gracePeriod=30 Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.145728 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz"] Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.146462 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" podUID="639fc84c-2f84-4a92-90f8-8e83c7155bb8" containerName="route-controller-manager" containerID="cri-o://aee91c773c68e157180f849e8e6136f6faf7945f6f44ccad7c90f4a615d0ca80" gracePeriod=30 Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.562143 4685 generic.go:334] "Generic (PLEG): container finished" podID="639fc84c-2f84-4a92-90f8-8e83c7155bb8" containerID="aee91c773c68e157180f849e8e6136f6faf7945f6f44ccad7c90f4a615d0ca80" exitCode=0 Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.562286 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" event={"ID":"639fc84c-2f84-4a92-90f8-8e83c7155bb8","Type":"ContainerDied","Data":"aee91c773c68e157180f849e8e6136f6faf7945f6f44ccad7c90f4a615d0ca80"} Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.564092 4685 generic.go:334] "Generic (PLEG): container finished" podID="4a297f4a-7dae-4c97-b91c-f9f63b51e838" containerID="cbf4621a523562b9fd4dd2fd636baf91c2b1004b732d91b19ce98ed30589d2a9" exitCode=0 Jan 28 12:31:03 crc kubenswrapper[4685]: I0128 12:31:03.564123 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" event={"ID":"4a297f4a-7dae-4c97-b91c-f9f63b51e838","Type":"ContainerDied","Data":"cbf4621a523562b9fd4dd2fd636baf91c2b1004b732d91b19ce98ed30589d2a9"} Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.094067 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.098550 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130583 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb2kc\" (UniqueName: \"kubernetes.io/projected/639fc84c-2f84-4a92-90f8-8e83c7155bb8-kube-api-access-fb2kc\") pod \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130649 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-client-ca\") pod \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130744 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-client-ca\") pod \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130805 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-proxy-ca-bundles\") pod \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130828 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-config\") pod \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130886 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/639fc84c-2f84-4a92-90f8-8e83c7155bb8-serving-cert\") pod \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\" (UID: \"639fc84c-2f84-4a92-90f8-8e83c7155bb8\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130928 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-config\") pod \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130950 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a297f4a-7dae-4c97-b91c-f9f63b51e838-serving-cert\") pod \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.130985 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnnll\" (UniqueName: \"kubernetes.io/projected/4a297f4a-7dae-4c97-b91c-f9f63b51e838-kube-api-access-gnnll\") pod \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\" (UID: \"4a297f4a-7dae-4c97-b91c-f9f63b51e838\") " Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.131716 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-client-ca" (OuterVolumeSpecName: "client-ca") pod "639fc84c-2f84-4a92-90f8-8e83c7155bb8" (UID: "639fc84c-2f84-4a92-90f8-8e83c7155bb8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.132248 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-client-ca" (OuterVolumeSpecName: "client-ca") pod "4a297f4a-7dae-4c97-b91c-f9f63b51e838" (UID: "4a297f4a-7dae-4c97-b91c-f9f63b51e838"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.132263 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4a297f4a-7dae-4c97-b91c-f9f63b51e838" (UID: "4a297f4a-7dae-4c97-b91c-f9f63b51e838"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.132389 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-config" (OuterVolumeSpecName: "config") pod "639fc84c-2f84-4a92-90f8-8e83c7155bb8" (UID: "639fc84c-2f84-4a92-90f8-8e83c7155bb8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.133703 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-config" (OuterVolumeSpecName: "config") pod "4a297f4a-7dae-4c97-b91c-f9f63b51e838" (UID: "4a297f4a-7dae-4c97-b91c-f9f63b51e838"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.139079 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/639fc84c-2f84-4a92-90f8-8e83c7155bb8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "639fc84c-2f84-4a92-90f8-8e83c7155bb8" (UID: "639fc84c-2f84-4a92-90f8-8e83c7155bb8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.139403 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/639fc84c-2f84-4a92-90f8-8e83c7155bb8-kube-api-access-fb2kc" (OuterVolumeSpecName: "kube-api-access-fb2kc") pod "639fc84c-2f84-4a92-90f8-8e83c7155bb8" (UID: "639fc84c-2f84-4a92-90f8-8e83c7155bb8"). InnerVolumeSpecName "kube-api-access-fb2kc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.139474 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a297f4a-7dae-4c97-b91c-f9f63b51e838-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4a297f4a-7dae-4c97-b91c-f9f63b51e838" (UID: "4a297f4a-7dae-4c97-b91c-f9f63b51e838"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.142819 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a297f4a-7dae-4c97-b91c-f9f63b51e838-kube-api-access-gnnll" (OuterVolumeSpecName: "kube-api-access-gnnll") pod "4a297f4a-7dae-4c97-b91c-f9f63b51e838" (UID: "4a297f4a-7dae-4c97-b91c-f9f63b51e838"). InnerVolumeSpecName "kube-api-access-gnnll". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232064 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/639fc84c-2f84-4a92-90f8-8e83c7155bb8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232128 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232144 4685 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a297f4a-7dae-4c97-b91c-f9f63b51e838-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232162 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnnll\" (UniqueName: \"kubernetes.io/projected/4a297f4a-7dae-4c97-b91c-f9f63b51e838-kube-api-access-gnnll\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232206 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb2kc\" (UniqueName: \"kubernetes.io/projected/639fc84c-2f84-4a92-90f8-8e83c7155bb8-kube-api-access-fb2kc\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232221 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232236 4685 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232251 4685 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4a297f4a-7dae-4c97-b91c-f9f63b51e838-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.232266 4685 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/639fc84c-2f84-4a92-90f8-8e83c7155bb8-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.470772 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5"] Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.470981 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.470999 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471012 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471020 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471477 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471492 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471504 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471511 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471524 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="639fc84c-2f84-4a92-90f8-8e83c7155bb8" containerName="route-controller-manager" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471531 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="639fc84c-2f84-4a92-90f8-8e83c7155bb8" containerName="route-controller-manager" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471541 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471549 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471559 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471566 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471575 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471584 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471590 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471597 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471645 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471653 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="extract-content" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471665 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471674 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471689 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a297f4a-7dae-4c97-b91c-f9f63b51e838" containerName="controller-manager" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471722 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a297f4a-7dae-4c97-b91c-f9f63b51e838" containerName="controller-manager" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471735 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471742 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="extract-utilities" Jan 28 12:31:04 crc kubenswrapper[4685]: E0128 12:31:04.471754 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.471761 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.472126 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4bd8edd-83cd-4485-b7ad-b13d5aa53a01" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.472143 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d558882-5a65-41ae-bcf0-d13c7cecc034" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.472153 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a297f4a-7dae-4c97-b91c-f9f63b51e838" containerName="controller-manager" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.472207 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="94eebdc5-a6e5-4f0a-973a-2ad42a77e0d2" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.472219 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d2d54a9-9f8c-4158-8551-c4351bca8c19" containerName="registry-server" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.472231 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="639fc84c-2f84-4a92-90f8-8e83c7155bb8" containerName="route-controller-manager" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.472811 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp"] Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.474151 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.474247 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.478727 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5"] Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.482671 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp"] Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.535951 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-client-ca\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536088 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws46q\" (UniqueName: \"kubernetes.io/projected/f9f4ab43-f730-4dd1-b305-19a256a75286-kube-api-access-ws46q\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536154 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-config\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536227 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-proxy-ca-bundles\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536275 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9f4ab43-f730-4dd1-b305-19a256a75286-client-ca\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536319 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9f4ab43-f730-4dd1-b305-19a256a75286-serving-cert\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536529 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9f4ab43-f730-4dd1-b305-19a256a75286-config\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536604 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbwdt\" (UniqueName: \"kubernetes.io/projected/b398e323-335f-48c9-b72b-eed794f32525-kube-api-access-cbwdt\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.536665 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b398e323-335f-48c9-b72b-eed794f32525-serving-cert\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.572278 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.572268 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76d5b7b487-7g56j" event={"ID":"4a297f4a-7dae-4c97-b91c-f9f63b51e838","Type":"ContainerDied","Data":"8ae733130c64995d75220d44c9eeec7fda4b84f315be3e1975e57fa590425512"} Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.572436 4685 scope.go:117] "RemoveContainer" containerID="cbf4621a523562b9fd4dd2fd636baf91c2b1004b732d91b19ce98ed30589d2a9" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.574557 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" event={"ID":"639fc84c-2f84-4a92-90f8-8e83c7155bb8","Type":"ContainerDied","Data":"f518892c04ffaec8aab27d8331ed536eae413e6e3a276b228f62fa788ea981f2"} Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.574635 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.595364 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz"] Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.603506 4685 scope.go:117] "RemoveContainer" containerID="aee91c773c68e157180f849e8e6136f6faf7945f6f44ccad7c90f4a615d0ca80" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.604844 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665c9c5d9f-nwjvz"] Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.608493 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-76d5b7b487-7g56j"] Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.611391 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-76d5b7b487-7g56j"] Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.640203 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b398e323-335f-48c9-b72b-eed794f32525-serving-cert\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.640257 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-client-ca\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.640292 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws46q\" (UniqueName: \"kubernetes.io/projected/f9f4ab43-f730-4dd1-b305-19a256a75286-kube-api-access-ws46q\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.640975 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-proxy-ca-bundles\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.641019 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-config\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.641049 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9f4ab43-f730-4dd1-b305-19a256a75286-client-ca\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.641074 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9f4ab43-f730-4dd1-b305-19a256a75286-serving-cert\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.641115 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9f4ab43-f730-4dd1-b305-19a256a75286-config\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.641158 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbwdt\" (UniqueName: \"kubernetes.io/projected/b398e323-335f-48c9-b72b-eed794f32525-kube-api-access-cbwdt\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.642301 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9f4ab43-f730-4dd1-b305-19a256a75286-client-ca\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.642456 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-client-ca\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.643330 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b398e323-335f-48c9-b72b-eed794f32525-serving-cert\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.643568 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9f4ab43-f730-4dd1-b305-19a256a75286-config\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.645017 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-config\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.645668 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9f4ab43-f730-4dd1-b305-19a256a75286-serving-cert\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.645968 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b398e323-335f-48c9-b72b-eed794f32525-proxy-ca-bundles\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.655835 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws46q\" (UniqueName: \"kubernetes.io/projected/f9f4ab43-f730-4dd1-b305-19a256a75286-kube-api-access-ws46q\") pod \"route-controller-manager-ff64d785c-4fbj5\" (UID: \"f9f4ab43-f730-4dd1-b305-19a256a75286\") " pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.657787 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbwdt\" (UniqueName: \"kubernetes.io/projected/b398e323-335f-48c9-b72b-eed794f32525-kube-api-access-cbwdt\") pod \"controller-manager-d6b68c7f4-qkgzp\" (UID: \"b398e323-335f-48c9-b72b-eed794f32525\") " pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.801233 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:04 crc kubenswrapper[4685]: I0128 12:31:04.810067 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.205918 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp"] Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.244653 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5"] Jan 28 12:31:05 crc kubenswrapper[4685]: W0128 12:31:05.251324 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9f4ab43_f730_4dd1_b305_19a256a75286.slice/crio-2ad56b40b0f3192e5acd0a5a72c6d8b15461e8f66a06028d149baa94fdd1ca0a WatchSource:0}: Error finding container 2ad56b40b0f3192e5acd0a5a72c6d8b15461e8f66a06028d149baa94fdd1ca0a: Status 404 returned error can't find the container with id 2ad56b40b0f3192e5acd0a5a72c6d8b15461e8f66a06028d149baa94fdd1ca0a Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.581567 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" event={"ID":"b398e323-335f-48c9-b72b-eed794f32525","Type":"ContainerStarted","Data":"48b0e6db80473337544822e26831d6aa5f184590f3df1844bf64f8d85606f96e"} Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.581609 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" event={"ID":"b398e323-335f-48c9-b72b-eed794f32525","Type":"ContainerStarted","Data":"6fde1b1ff896ed35c5d39e9383af27311857ece562a4d6644aa2e85d9aeb9f13"} Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.581855 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.584365 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" event={"ID":"f9f4ab43-f730-4dd1-b305-19a256a75286","Type":"ContainerStarted","Data":"cfa7e37e40d3f2fdd5f4aa910fd3e65a5f98f219d30acf417bcf671b8fc5decb"} Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.584408 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" event={"ID":"f9f4ab43-f730-4dd1-b305-19a256a75286","Type":"ContainerStarted","Data":"2ad56b40b0f3192e5acd0a5a72c6d8b15461e8f66a06028d149baa94fdd1ca0a"} Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.584689 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.589670 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.602602 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-d6b68c7f4-qkgzp" podStartSLOduration=2.6025863190000003 podStartE2EDuration="2.602586319s" podCreationTimestamp="2026-01-28 12:31:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:31:05.599347797 +0000 UTC m=+616.686761632" watchObservedRunningTime="2026-01-28 12:31:05.602586319 +0000 UTC m=+616.690000154" Jan 28 12:31:05 crc kubenswrapper[4685]: I0128 12:31:05.618642 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" podStartSLOduration=2.618623444 podStartE2EDuration="2.618623444s" podCreationTimestamp="2026-01-28 12:31:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:31:05.616882575 +0000 UTC m=+616.704296420" watchObservedRunningTime="2026-01-28 12:31:05.618623444 +0000 UTC m=+616.706037279" Jan 28 12:31:06 crc kubenswrapper[4685]: I0128 12:31:06.127727 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-ff64d785c-4fbj5" Jan 28 12:31:06 crc kubenswrapper[4685]: I0128 12:31:06.557966 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a297f4a-7dae-4c97-b91c-f9f63b51e838" path="/var/lib/kubelet/pods/4a297f4a-7dae-4c97-b91c-f9f63b51e838/volumes" Jan 28 12:31:06 crc kubenswrapper[4685]: I0128 12:31:06.559309 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="639fc84c-2f84-4a92-90f8-8e83c7155bb8" path="/var/lib/kubelet/pods/639fc84c-2f84-4a92-90f8-8e83c7155bb8/volumes" Jan 28 12:31:07 crc kubenswrapper[4685]: I0128 12:31:07.516066 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" podUID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" containerName="oauth-openshift" containerID="cri-o://f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d" gracePeriod=15 Jan 28 12:31:07 crc kubenswrapper[4685]: I0128 12:31:07.918657 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.082799 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-service-ca\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.082911 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-serving-cert\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.082968 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-router-certs\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.083009 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7v4x8\" (UniqueName: \"kubernetes.io/projected/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-kube-api-access-7v4x8\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.083047 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-login\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.083081 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-provider-selection\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.083133 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-ocp-branding-template\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.083396 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-cliconfig\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.084136 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.084312 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.084481 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-error\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085056 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-idp-0-file-data\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085105 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-trusted-ca-bundle\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085135 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-session\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085217 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-policies\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085287 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-dir\") pod \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\" (UID: \"4b002972-6a89-4a6a-9839-a69e5e9ff3e5\") " Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085647 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085671 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.085998 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.086264 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.086712 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.090660 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-kube-api-access-7v4x8" (OuterVolumeSpecName: "kube-api-access-7v4x8") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "kube-api-access-7v4x8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.092030 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.093313 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.093723 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.093749 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.093991 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.094369 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.095569 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.098594 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "4b002972-6a89-4a6a-9839-a69e5e9ff3e5" (UID: "4b002972-6a89-4a6a-9839-a69e5e9ff3e5"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187538 4685 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187585 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187602 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7v4x8\" (UniqueName: \"kubernetes.io/projected/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-kube-api-access-7v4x8\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187617 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187635 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187649 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187664 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187679 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187691 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187707 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187719 4685 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.187732 4685 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4b002972-6a89-4a6a-9839-a69e5e9ff3e5-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.610886 4685 generic.go:334] "Generic (PLEG): container finished" podID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" containerID="f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d" exitCode=0 Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.610934 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" event={"ID":"4b002972-6a89-4a6a-9839-a69e5e9ff3e5","Type":"ContainerDied","Data":"f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d"} Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.610964 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" event={"ID":"4b002972-6a89-4a6a-9839-a69e5e9ff3e5","Type":"ContainerDied","Data":"8fc38eb5ca5dea78125ca1e8c5d30d5b4cba1772038fa9a467188c194f6250f2"} Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.610984 4685 scope.go:117] "RemoveContainer" containerID="f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.611417 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4gdsr" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.631868 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4gdsr"] Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.637544 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4gdsr"] Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.641617 4685 scope.go:117] "RemoveContainer" containerID="f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d" Jan 28 12:31:08 crc kubenswrapper[4685]: E0128 12:31:08.642080 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d\": container with ID starting with f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d not found: ID does not exist" containerID="f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d" Jan 28 12:31:08 crc kubenswrapper[4685]: I0128 12:31:08.642110 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d"} err="failed to get container status \"f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d\": rpc error: code = NotFound desc = could not find container \"f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d\": container with ID starting with f9eb75d5cc28e59132199f58bf9846bcfee46d5efeb39f7e0e53d96bd43c203d not found: ID does not exist" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.474347 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-54b998df4d-jpqtt"] Jan 28 12:31:10 crc kubenswrapper[4685]: E0128 12:31:10.475429 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" containerName="oauth-openshift" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.475466 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" containerName="oauth-openshift" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.475785 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" containerName="oauth-openshift" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.476389 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.479150 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.486490 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.486779 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.487032 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.487196 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.487477 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.487647 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.487777 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.487949 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.487998 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.490905 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.518870 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.519908 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520433 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-error\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520480 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520507 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-audit-policies\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520543 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520638 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520713 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520751 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520777 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520809 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-login\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520831 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-session\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520872 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/734e6553-7b86-4a1f-838c-eb675e984238-audit-dir\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520909 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520955 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnw69\" (UniqueName: \"kubernetes.io/projected/734e6553-7b86-4a1f-838c-eb675e984238-kube-api-access-dnw69\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.520989 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.522265 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.525088 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54b998df4d-jpqtt"] Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.530639 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.555378 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b002972-6a89-4a6a-9839-a69e5e9ff3e5" path="/var/lib/kubelet/pods/4b002972-6a89-4a6a-9839-a69e5e9ff3e5/volumes" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622446 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622518 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-error\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622546 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622565 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-audit-policies\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622590 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622605 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622627 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622652 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622678 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622703 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-login\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622729 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-session\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622758 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/734e6553-7b86-4a1f-838c-eb675e984238-audit-dir\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622785 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.622805 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnw69\" (UniqueName: \"kubernetes.io/projected/734e6553-7b86-4a1f-838c-eb675e984238-kube-api-access-dnw69\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.623908 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/734e6553-7b86-4a1f-838c-eb675e984238-audit-dir\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.624530 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.624859 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.625402 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-audit-policies\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.625708 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.627380 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-error\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.630349 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.630539 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.632450 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.632459 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.633111 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.633730 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-user-template-login\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.634804 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/734e6553-7b86-4a1f-838c-eb675e984238-v4-0-config-system-session\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.661885 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnw69\" (UniqueName: \"kubernetes.io/projected/734e6553-7b86-4a1f-838c-eb675e984238-kube-api-access-dnw69\") pod \"oauth-openshift-54b998df4d-jpqtt\" (UID: \"734e6553-7b86-4a1f-838c-eb675e984238\") " pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:10 crc kubenswrapper[4685]: I0128 12:31:10.824790 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:11 crc kubenswrapper[4685]: I0128 12:31:11.267555 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54b998df4d-jpqtt"] Jan 28 12:31:11 crc kubenswrapper[4685]: W0128 12:31:11.275841 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod734e6553_7b86_4a1f_838c_eb675e984238.slice/crio-20ccb24bbfde31f9b5d7af7032fe2b860480f60b17919715c78adb5ca9536a57 WatchSource:0}: Error finding container 20ccb24bbfde31f9b5d7af7032fe2b860480f60b17919715c78adb5ca9536a57: Status 404 returned error can't find the container with id 20ccb24bbfde31f9b5d7af7032fe2b860480f60b17919715c78adb5ca9536a57 Jan 28 12:31:11 crc kubenswrapper[4685]: I0128 12:31:11.635995 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" event={"ID":"734e6553-7b86-4a1f-838c-eb675e984238","Type":"ContainerStarted","Data":"20ccb24bbfde31f9b5d7af7032fe2b860480f60b17919715c78adb5ca9536a57"} Jan 28 12:31:12 crc kubenswrapper[4685]: I0128 12:31:12.643601 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" event={"ID":"734e6553-7b86-4a1f-838c-eb675e984238","Type":"ContainerStarted","Data":"c3bd2f43863f7f17012c065f0556ca930fa97e422319f9c09108690902dd6f65"} Jan 28 12:31:12 crc kubenswrapper[4685]: I0128 12:31:12.644785 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:12 crc kubenswrapper[4685]: I0128 12:31:12.651424 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" Jan 28 12:31:12 crc kubenswrapper[4685]: I0128 12:31:12.676776 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-54b998df4d-jpqtt" podStartSLOduration=30.676742688 podStartE2EDuration="30.676742688s" podCreationTimestamp="2026-01-28 12:30:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:31:12.669691977 +0000 UTC m=+623.757105822" watchObservedRunningTime="2026-01-28 12:31:12.676742688 +0000 UTC m=+623.764156523" Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.786687 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9q8q2"] Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.787558 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9q8q2" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="registry-server" containerID="cri-o://a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b" gracePeriod=30 Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.806460 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-924cb"] Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.806753 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-924cb" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="registry-server" containerID="cri-o://03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79" gracePeriod=30 Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.809815 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qpc29"] Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.809970 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" containerID="cri-o://e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1" gracePeriod=30 Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.824982 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8cf72"] Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.825229 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8cf72" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="registry-server" containerID="cri-o://e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" gracePeriod=30 Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.840939 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7qzcb"] Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.841677 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.847482 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wl9lt"] Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.847772 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wl9lt" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="registry-server" containerID="cri-o://6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815" gracePeriod=30 Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.854283 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7qzcb"] Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.981545 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.981611 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:22 crc kubenswrapper[4685]: I0128 12:31:22.981729 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmgff\" (UniqueName: \"kubernetes.io/projected/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-kube-api-access-qmgff\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.082770 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmgff\" (UniqueName: \"kubernetes.io/projected/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-kube-api-access-qmgff\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.082875 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.082907 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.084549 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.089949 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.100449 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmgff\" (UniqueName: \"kubernetes.io/projected/e4297ea9-021a-467f-b78b-89ba6ae5a6b1-kube-api-access-qmgff\") pod \"marketplace-operator-79b997595-7qzcb\" (UID: \"e4297ea9-021a-467f-b78b-89ba6ae5a6b1\") " pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.129071 4685 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d is running failed: container process not found" containerID="e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.129598 4685 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d is running failed: container process not found" containerID="e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.130202 4685 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d is running failed: container process not found" containerID="e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.130236 4685 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-8cf72" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="registry-server" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.175683 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.346835 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.388952 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-catalog-content\") pod \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.388994 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8m5z\" (UniqueName: \"kubernetes.io/projected/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-kube-api-access-f8m5z\") pod \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.389027 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-utilities\") pod \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\" (UID: \"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.390010 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-utilities" (OuterVolumeSpecName: "utilities") pod "cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" (UID: "cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.395085 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-kube-api-access-f8m5z" (OuterVolumeSpecName: "kube-api-access-f8m5z") pod "cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" (UID: "cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb"). InnerVolumeSpecName "kube-api-access-f8m5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.458397 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" (UID: "cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.473830 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/3.log" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.474022 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.492175 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.492249 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8m5z\" (UniqueName: \"kubernetes.io/projected/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-kube-api-access-f8m5z\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.492264 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.538490 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.543818 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.554790 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.593185 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-operator-metrics\") pod \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.593227 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkg7l\" (UniqueName: \"kubernetes.io/projected/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-kube-api-access-xkg7l\") pod \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.593250 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-trusted-ca\") pod \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\" (UID: \"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.594021 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" (UID: "68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.595938 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" (UID: "68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.596262 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-kube-api-access-xkg7l" (OuterVolumeSpecName: "kube-api-access-xkg7l") pod "68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" (UID: "68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45"). InnerVolumeSpecName "kube-api-access-xkg7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694753 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rdrn\" (UniqueName: \"kubernetes.io/projected/424bdf45-4dcb-4b13-b68f-e55e115238bb-kube-api-access-2rdrn\") pod \"424bdf45-4dcb-4b13-b68f-e55e115238bb\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694803 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-catalog-content\") pod \"e68fe67b-133d-4474-8f6d-a781bca954d7\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694841 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-catalog-content\") pod \"424bdf45-4dcb-4b13-b68f-e55e115238bb\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694873 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-utilities\") pod \"8d93a170-5ad3-489b-b3be-7e3cc4201970\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694894 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-utilities\") pod \"e68fe67b-133d-4474-8f6d-a781bca954d7\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694932 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bjln\" (UniqueName: \"kubernetes.io/projected/e68fe67b-133d-4474-8f6d-a781bca954d7-kube-api-access-6bjln\") pod \"e68fe67b-133d-4474-8f6d-a781bca954d7\" (UID: \"e68fe67b-133d-4474-8f6d-a781bca954d7\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694950 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-catalog-content\") pod \"8d93a170-5ad3-489b-b3be-7e3cc4201970\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694966 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-utilities\") pod \"424bdf45-4dcb-4b13-b68f-e55e115238bb\" (UID: \"424bdf45-4dcb-4b13-b68f-e55e115238bb\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.694982 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mqdw\" (UniqueName: \"kubernetes.io/projected/8d93a170-5ad3-489b-b3be-7e3cc4201970-kube-api-access-8mqdw\") pod \"8d93a170-5ad3-489b-b3be-7e3cc4201970\" (UID: \"8d93a170-5ad3-489b-b3be-7e3cc4201970\") " Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.695154 4685 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.695180 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkg7l\" (UniqueName: \"kubernetes.io/projected/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-kube-api-access-xkg7l\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.695192 4685 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.695881 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-utilities" (OuterVolumeSpecName: "utilities") pod "8d93a170-5ad3-489b-b3be-7e3cc4201970" (UID: "8d93a170-5ad3-489b-b3be-7e3cc4201970"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.696462 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-utilities" (OuterVolumeSpecName: "utilities") pod "e68fe67b-133d-4474-8f6d-a781bca954d7" (UID: "e68fe67b-133d-4474-8f6d-a781bca954d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.697049 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-utilities" (OuterVolumeSpecName: "utilities") pod "424bdf45-4dcb-4b13-b68f-e55e115238bb" (UID: "424bdf45-4dcb-4b13-b68f-e55e115238bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.698450 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e68fe67b-133d-4474-8f6d-a781bca954d7-kube-api-access-6bjln" (OuterVolumeSpecName: "kube-api-access-6bjln") pod "e68fe67b-133d-4474-8f6d-a781bca954d7" (UID: "e68fe67b-133d-4474-8f6d-a781bca954d7"). InnerVolumeSpecName "kube-api-access-6bjln". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.698522 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d93a170-5ad3-489b-b3be-7e3cc4201970-kube-api-access-8mqdw" (OuterVolumeSpecName: "kube-api-access-8mqdw") pod "8d93a170-5ad3-489b-b3be-7e3cc4201970" (UID: "8d93a170-5ad3-489b-b3be-7e3cc4201970"). InnerVolumeSpecName "kube-api-access-8mqdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.698885 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/424bdf45-4dcb-4b13-b68f-e55e115238bb-kube-api-access-2rdrn" (OuterVolumeSpecName: "kube-api-access-2rdrn") pod "424bdf45-4dcb-4b13-b68f-e55e115238bb" (UID: "424bdf45-4dcb-4b13-b68f-e55e115238bb"). InnerVolumeSpecName "kube-api-access-2rdrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.705871 4685 generic.go:334] "Generic (PLEG): container finished" podID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerID="03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79" exitCode=0 Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.706100 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerDied","Data":"03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.706290 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-924cb" event={"ID":"cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb","Type":"ContainerDied","Data":"918f0e08f6766c49518b045eb18a6059f0d184d963cb3daa1adfd03ecfe865ea"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.706458 4685 scope.go:117] "RemoveContainer" containerID="03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.706468 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-924cb" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.709063 4685 generic.go:334] "Generic (PLEG): container finished" podID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerID="e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" exitCode=0 Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.709123 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerDied","Data":"e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.709143 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8cf72" event={"ID":"424bdf45-4dcb-4b13-b68f-e55e115238bb","Type":"ContainerDied","Data":"0d50f9d6fe807b9be40093668486b29f1b3403426ea9ba079cd10c933db35e27"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.709223 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8cf72" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.713614 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qpc29_68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/marketplace-operator/3.log" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.713685 4685 generic.go:334] "Generic (PLEG): container finished" podID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerID="e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1" exitCode=0 Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.713758 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.713761 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerDied","Data":"e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.713878 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qpc29" event={"ID":"68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45","Type":"ContainerDied","Data":"8764eec28c9522ea30e1bd7d699ac9753a20358c5ea924784f39ee0e7761e885"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.729250 4685 generic.go:334] "Generic (PLEG): container finished" podID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerID="a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b" exitCode=0 Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.729391 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9q8q2" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.730604 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerDied","Data":"a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.734251 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9q8q2" event={"ID":"8d93a170-5ad3-489b-b3be-7e3cc4201970","Type":"ContainerDied","Data":"55f3b0c9d15a20d69ddfb8460e50ace0d6be50d7231959400186c86ffd4af39b"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.736636 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "424bdf45-4dcb-4b13-b68f-e55e115238bb" (UID: "424bdf45-4dcb-4b13-b68f-e55e115238bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.743039 4685 generic.go:334] "Generic (PLEG): container finished" podID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerID="6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815" exitCode=0 Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.743089 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerDied","Data":"6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.743139 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wl9lt" event={"ID":"e68fe67b-133d-4474-8f6d-a781bca954d7","Type":"ContainerDied","Data":"af175ec1973081f50e23fa6ed31f3e80a857d9999a0dc987e18dd26963641541"} Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.743223 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wl9lt" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.758011 4685 scope.go:117] "RemoveContainer" containerID="c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.760054 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qpc29"] Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.768894 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qpc29"] Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.773083 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-924cb"] Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.778387 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-924cb"] Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.779416 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7qzcb"] Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.782684 4685 scope.go:117] "RemoveContainer" containerID="2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.789681 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d93a170-5ad3-489b-b3be-7e3cc4201970" (UID: "8d93a170-5ad3-489b-b3be-7e3cc4201970"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796286 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796534 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796544 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bjln\" (UniqueName: \"kubernetes.io/projected/e68fe67b-133d-4474-8f6d-a781bca954d7-kube-api-access-6bjln\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796553 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d93a170-5ad3-489b-b3be-7e3cc4201970-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796561 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796572 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mqdw\" (UniqueName: \"kubernetes.io/projected/8d93a170-5ad3-489b-b3be-7e3cc4201970-kube-api-access-8mqdw\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796580 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rdrn\" (UniqueName: \"kubernetes.io/projected/424bdf45-4dcb-4b13-b68f-e55e115238bb-kube-api-access-2rdrn\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.796588 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424bdf45-4dcb-4b13-b68f-e55e115238bb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.801422 4685 scope.go:117] "RemoveContainer" containerID="03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.801790 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79\": container with ID starting with 03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79 not found: ID does not exist" containerID="03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.801814 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79"} err="failed to get container status \"03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79\": rpc error: code = NotFound desc = could not find container \"03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79\": container with ID starting with 03de7b34e3d85908ae1ef4b1a73ddc86f7d8fcfffbd239d2f862a007ed29de79 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.801834 4685 scope.go:117] "RemoveContainer" containerID="c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.802064 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c\": container with ID starting with c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c not found: ID does not exist" containerID="c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.802087 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c"} err="failed to get container status \"c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c\": rpc error: code = NotFound desc = could not find container \"c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c\": container with ID starting with c46783e3879d575621e86df988fda5b3816abe198b66239ad4948047f1cb1b2c not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.802104 4685 scope.go:117] "RemoveContainer" containerID="2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.802302 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10\": container with ID starting with 2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10 not found: ID does not exist" containerID="2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.802320 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10"} err="failed to get container status \"2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10\": rpc error: code = NotFound desc = could not find container \"2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10\": container with ID starting with 2ce337ce2726e42df63521ff2089593518785fdc7d0bc0ffcd114b38f8966b10 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.802333 4685 scope.go:117] "RemoveContainer" containerID="e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.821525 4685 scope.go:117] "RemoveContainer" containerID="3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.839422 4685 scope.go:117] "RemoveContainer" containerID="7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.847700 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e68fe67b-133d-4474-8f6d-a781bca954d7" (UID: "e68fe67b-133d-4474-8f6d-a781bca954d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.867023 4685 scope.go:117] "RemoveContainer" containerID="e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.867377 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d\": container with ID starting with e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d not found: ID does not exist" containerID="e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.867468 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d"} err="failed to get container status \"e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d\": rpc error: code = NotFound desc = could not find container \"e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d\": container with ID starting with e752342182b5e9b20834b5baf524907384a8365cc36be08ac75ac918cef53c0d not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.867545 4685 scope.go:117] "RemoveContainer" containerID="3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.868064 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77\": container with ID starting with 3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77 not found: ID does not exist" containerID="3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.868146 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77"} err="failed to get container status \"3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77\": rpc error: code = NotFound desc = could not find container \"3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77\": container with ID starting with 3659d0243c876cdcac8cad157f8bd7e3a795e9778cce8e1d98f1518bbdffcc77 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.868227 4685 scope.go:117] "RemoveContainer" containerID="7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.868719 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589\": container with ID starting with 7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589 not found: ID does not exist" containerID="7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.868808 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589"} err="failed to get container status \"7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589\": rpc error: code = NotFound desc = could not find container \"7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589\": container with ID starting with 7c8b0d137592613182f0105ce97152903db18daa84807d3c6dcd9c1d8be01589 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.868886 4685 scope.go:117] "RemoveContainer" containerID="e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.880632 4685 scope.go:117] "RemoveContainer" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.895318 4685 scope.go:117] "RemoveContainer" containerID="e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.895748 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1\": container with ID starting with e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1 not found: ID does not exist" containerID="e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.895851 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1"} err="failed to get container status \"e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1\": rpc error: code = NotFound desc = could not find container \"e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1\": container with ID starting with e930805a53fabf004af5c8560f837c99b859d572f1018add50bd973e93fb06f1 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.895932 4685 scope.go:117] "RemoveContainer" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.896272 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931\": container with ID starting with 7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931 not found: ID does not exist" containerID="7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.896367 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931"} err="failed to get container status \"7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931\": rpc error: code = NotFound desc = could not find container \"7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931\": container with ID starting with 7de13053aa8d167e65054867c6ad6d63a0a169e09c0a1abe3c97432b3cf90931 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.896457 4685 scope.go:117] "RemoveContainer" containerID="a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.898094 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68fe67b-133d-4474-8f6d-a781bca954d7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.921864 4685 scope.go:117] "RemoveContainer" containerID="c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.938410 4685 scope.go:117] "RemoveContainer" containerID="2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.992697 4685 scope.go:117] "RemoveContainer" containerID="a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.993177 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b\": container with ID starting with a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b not found: ID does not exist" containerID="a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.993287 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b"} err="failed to get container status \"a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b\": rpc error: code = NotFound desc = could not find container \"a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b\": container with ID starting with a98be9e5af6900c610be2904f219404b02c4c12e10e4ed3af243b289ce6da76b not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.993383 4685 scope.go:117] "RemoveContainer" containerID="c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.994012 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12\": container with ID starting with c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12 not found: ID does not exist" containerID="c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.994045 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12"} err="failed to get container status \"c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12\": rpc error: code = NotFound desc = could not find container \"c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12\": container with ID starting with c107caeb14757a9e2f196dad259efa8c46aa357e2d51892411fb356c2bfabb12 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.994070 4685 scope.go:117] "RemoveContainer" containerID="2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74" Jan 28 12:31:23 crc kubenswrapper[4685]: E0128 12:31:23.994342 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74\": container with ID starting with 2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74 not found: ID does not exist" containerID="2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.994365 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74"} err="failed to get container status \"2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74\": rpc error: code = NotFound desc = could not find container \"2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74\": container with ID starting with 2559962c18c92ed9826fb516da83471266e9347e292f2e936859d5d88d5cde74 not found: ID does not exist" Jan 28 12:31:23 crc kubenswrapper[4685]: I0128 12:31:23.994381 4685 scope.go:117] "RemoveContainer" containerID="6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.008613 4685 scope.go:117] "RemoveContainer" containerID="e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.036080 4685 scope.go:117] "RemoveContainer" containerID="fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.054823 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8cf72"] Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.057946 4685 scope.go:117] "RemoveContainer" containerID="6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815" Jan 28 12:31:24 crc kubenswrapper[4685]: E0128 12:31:24.058676 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815\": container with ID starting with 6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815 not found: ID does not exist" containerID="6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.058800 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815"} err="failed to get container status \"6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815\": rpc error: code = NotFound desc = could not find container \"6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815\": container with ID starting with 6518f209e38815a3951d44c5d8f748037065a21fc8667d52d2610a6024a15815 not found: ID does not exist" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.058886 4685 scope.go:117] "RemoveContainer" containerID="e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.059015 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8cf72"] Jan 28 12:31:24 crc kubenswrapper[4685]: E0128 12:31:24.059220 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182\": container with ID starting with e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182 not found: ID does not exist" containerID="e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.059296 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182"} err="failed to get container status \"e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182\": rpc error: code = NotFound desc = could not find container \"e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182\": container with ID starting with e8a3c577e92aeb5212e6c1871671df58751247d3d7a016f2c8acb052e3d6b182 not found: ID does not exist" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.059359 4685 scope.go:117] "RemoveContainer" containerID="fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f" Jan 28 12:31:24 crc kubenswrapper[4685]: E0128 12:31:24.060475 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f\": container with ID starting with fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f not found: ID does not exist" containerID="fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.060566 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f"} err="failed to get container status \"fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f\": rpc error: code = NotFound desc = could not find container \"fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f\": container with ID starting with fb9f3f9ce72e59a48c65798902e12dc57ccf3e99e09deed7fe83657bd673634f not found: ID does not exist" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.063669 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9q8q2"] Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.068713 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9q8q2"] Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.086346 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wl9lt"] Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.093981 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wl9lt"] Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.554017 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" path="/var/lib/kubelet/pods/424bdf45-4dcb-4b13-b68f-e55e115238bb/volumes" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.554906 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" path="/var/lib/kubelet/pods/68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45/volumes" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.555531 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" path="/var/lib/kubelet/pods/8d93a170-5ad3-489b-b3be-7e3cc4201970/volumes" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.556951 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" path="/var/lib/kubelet/pods/cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb/volumes" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.557710 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" path="/var/lib/kubelet/pods/e68fe67b-133d-4474-8f6d-a781bca954d7/volumes" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.752101 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" event={"ID":"e4297ea9-021a-467f-b78b-89ba6ae5a6b1","Type":"ContainerStarted","Data":"0d95977846cba874246d6a0bfe4899f8d065e62a1c82b07a2d81c30271abfdfa"} Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.752153 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" event={"ID":"e4297ea9-021a-467f-b78b-89ba6ae5a6b1","Type":"ContainerStarted","Data":"d6f5852ea6f625bb0744f3ef55e8ccad04bc167062fbccc647df17f476d51ca7"} Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.752331 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.758406 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" Jan 28 12:31:24 crc kubenswrapper[4685]: I0128 12:31:24.792672 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-7qzcb" podStartSLOduration=2.792649176 podStartE2EDuration="2.792649176s" podCreationTimestamp="2026-01-28 12:31:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:31:24.767718087 +0000 UTC m=+635.855131922" watchObservedRunningTime="2026-01-28 12:31:24.792649176 +0000 UTC m=+635.880063011" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.683789 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vf9sl"] Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684087 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684113 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684131 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684143 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684162 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684192 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684209 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684220 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684235 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684247 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684262 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684272 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684297 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684350 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684366 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684376 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684424 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684437 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684451 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684462 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684475 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684512 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684528 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684538 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="extract-content" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684554 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684605 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684619 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684629 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="extract-utilities" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684640 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684711 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.684728 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.684738 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685077 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d93a170-5ad3-489b-b3be-7e3cc4201970" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685097 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdbb77b0-98bc-432c-a1ed-ea9e7a1b02cb" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685154 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685214 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685226 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68fe67b-133d-4474-8f6d-a781bca954d7" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685239 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="424bdf45-4dcb-4b13-b68f-e55e115238bb" containerName="registry-server" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685255 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: E0128 12:31:25.685568 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685591 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685850 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.685874 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="68d55ba6-d3eb-4ec0-9deb-40fc8a6c2f45" containerName="marketplace-operator" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.687568 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.690152 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jkngf"] Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.691047 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.691345 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.693491 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.700166 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vf9sl"] Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.704435 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkngf"] Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.820705 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-utilities\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.820747 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-catalog-content\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.820802 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmbw9\" (UniqueName: \"kubernetes.io/projected/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-kube-api-access-dmbw9\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.820832 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whj47\" (UniqueName: \"kubernetes.io/projected/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-kube-api-access-whj47\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.820857 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-utilities\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.820877 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-catalog-content\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.922385 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-utilities\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.922452 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-catalog-content\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.922542 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmbw9\" (UniqueName: \"kubernetes.io/projected/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-kube-api-access-dmbw9\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.922623 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whj47\" (UniqueName: \"kubernetes.io/projected/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-kube-api-access-whj47\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.922688 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-utilities\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.922749 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-catalog-content\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.922920 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-utilities\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.923254 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-catalog-content\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.923323 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-catalog-content\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.924124 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-utilities\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.946498 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmbw9\" (UniqueName: \"kubernetes.io/projected/94dd5e0c-ac23-490c-a1da-3e08ca35ecda-kube-api-access-dmbw9\") pod \"redhat-marketplace-vf9sl\" (UID: \"94dd5e0c-ac23-490c-a1da-3e08ca35ecda\") " pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:25 crc kubenswrapper[4685]: I0128 12:31:25.953532 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whj47\" (UniqueName: \"kubernetes.io/projected/04f395cb-91b3-4fc2-a434-1db8e1a2d32f-kube-api-access-whj47\") pod \"community-operators-jkngf\" (UID: \"04f395cb-91b3-4fc2-a434-1db8e1a2d32f\") " pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.020044 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.026944 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.427113 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vf9sl"] Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.501879 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkngf"] Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.783868 4685 generic.go:334] "Generic (PLEG): container finished" podID="04f395cb-91b3-4fc2-a434-1db8e1a2d32f" containerID="f96bebb6667c33a18f0ffe43e688e4a93cc2e61ac1611966bdd73c6fdb6648d3" exitCode=0 Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.783951 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkngf" event={"ID":"04f395cb-91b3-4fc2-a434-1db8e1a2d32f","Type":"ContainerDied","Data":"f96bebb6667c33a18f0ffe43e688e4a93cc2e61ac1611966bdd73c6fdb6648d3"} Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.783974 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkngf" event={"ID":"04f395cb-91b3-4fc2-a434-1db8e1a2d32f","Type":"ContainerStarted","Data":"eedc03d2926076b10e332aeab815934b4ab8782f6992871b1c2cff9124411359"} Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.785975 4685 generic.go:334] "Generic (PLEG): container finished" podID="94dd5e0c-ac23-490c-a1da-3e08ca35ecda" containerID="8b88cfa2af28a492c0d0e66fba7611fc8fb490252327cbefb1093a1d26b3fb1f" exitCode=0 Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.786019 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vf9sl" event={"ID":"94dd5e0c-ac23-490c-a1da-3e08ca35ecda","Type":"ContainerDied","Data":"8b88cfa2af28a492c0d0e66fba7611fc8fb490252327cbefb1093a1d26b3fb1f"} Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.786076 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vf9sl" event={"ID":"94dd5e0c-ac23-490c-a1da-3e08ca35ecda","Type":"ContainerStarted","Data":"4d4618fd551fb4525fbb014d25d4d1a4c4a06ebaf5d4e52cadedc56991953957"} Jan 28 12:31:26 crc kubenswrapper[4685]: I0128 12:31:26.787095 4685 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.069727 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.069802 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.207411 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kh692"] Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.209657 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.211774 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.218072 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kh692"] Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.242905 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnhp2\" (UniqueName: \"kubernetes.io/projected/d5738525-4c41-42c5-85d3-afce7539f7c8-kube-api-access-lnhp2\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.242962 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-catalog-content\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.243018 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-utilities\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.344510 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-catalog-content\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.344611 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-utilities\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.344646 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnhp2\" (UniqueName: \"kubernetes.io/projected/d5738525-4c41-42c5-85d3-afce7539f7c8-kube-api-access-lnhp2\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.345024 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-catalog-content\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.345433 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-utilities\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.365973 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnhp2\" (UniqueName: \"kubernetes.io/projected/d5738525-4c41-42c5-85d3-afce7539f7c8-kube-api-access-lnhp2\") pod \"certified-operators-kh692\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.527386 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.792199 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkngf" event={"ID":"04f395cb-91b3-4fc2-a434-1db8e1a2d32f","Type":"ContainerStarted","Data":"7829f32e8c5c4044f07ca1f1955574525210e4b3e28cc1904eb41160658fd5b1"} Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.795025 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vf9sl" event={"ID":"94dd5e0c-ac23-490c-a1da-3e08ca35ecda","Type":"ContainerStarted","Data":"24a3ca13ff008c7d06e0a4d5171b984ad69be49dc9a56f1832351879dcb77e68"} Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.813502 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nfmfw"] Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.816222 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.819695 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.822798 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nfmfw"] Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.850688 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-utilities\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.850741 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z96zq\" (UniqueName: \"kubernetes.io/projected/14ffad50-547e-4c2a-b72c-d6a87b50e746-kube-api-access-z96zq\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.850766 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-catalog-content\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.942575 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kh692"] Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.951510 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z96zq\" (UniqueName: \"kubernetes.io/projected/14ffad50-547e-4c2a-b72c-d6a87b50e746-kube-api-access-z96zq\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.951568 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-catalog-content\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.951637 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-utilities\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.952288 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-utilities\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.952402 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-catalog-content\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:27 crc kubenswrapper[4685]: W0128 12:31:27.960001 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5738525_4c41_42c5_85d3_afce7539f7c8.slice/crio-e8b2cdfb3944f40b025595831a566b53a999c1f0dcfe1da9eb67e5ce87f91ce9 WatchSource:0}: Error finding container e8b2cdfb3944f40b025595831a566b53a999c1f0dcfe1da9eb67e5ce87f91ce9: Status 404 returned error can't find the container with id e8b2cdfb3944f40b025595831a566b53a999c1f0dcfe1da9eb67e5ce87f91ce9 Jan 28 12:31:27 crc kubenswrapper[4685]: I0128 12:31:27.973475 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z96zq\" (UniqueName: \"kubernetes.io/projected/14ffad50-547e-4c2a-b72c-d6a87b50e746-kube-api-access-z96zq\") pod \"redhat-operators-nfmfw\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.150546 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.542460 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nfmfw"] Jan 28 12:31:28 crc kubenswrapper[4685]: W0128 12:31:28.551656 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14ffad50_547e_4c2a_b72c_d6a87b50e746.slice/crio-ec02327c7bc9e9ac7369a31c424a0988c5836c201f1008989f54889605afcc2c WatchSource:0}: Error finding container ec02327c7bc9e9ac7369a31c424a0988c5836c201f1008989f54889605afcc2c: Status 404 returned error can't find the container with id ec02327c7bc9e9ac7369a31c424a0988c5836c201f1008989f54889605afcc2c Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.806874 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfmfw" event={"ID":"14ffad50-547e-4c2a-b72c-d6a87b50e746","Type":"ContainerStarted","Data":"ec02327c7bc9e9ac7369a31c424a0988c5836c201f1008989f54889605afcc2c"} Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.808531 4685 generic.go:334] "Generic (PLEG): container finished" podID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerID="b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41" exitCode=0 Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.808579 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh692" event={"ID":"d5738525-4c41-42c5-85d3-afce7539f7c8","Type":"ContainerDied","Data":"b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41"} Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.808620 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh692" event={"ID":"d5738525-4c41-42c5-85d3-afce7539f7c8","Type":"ContainerStarted","Data":"e8b2cdfb3944f40b025595831a566b53a999c1f0dcfe1da9eb67e5ce87f91ce9"} Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.813318 4685 generic.go:334] "Generic (PLEG): container finished" podID="94dd5e0c-ac23-490c-a1da-3e08ca35ecda" containerID="24a3ca13ff008c7d06e0a4d5171b984ad69be49dc9a56f1832351879dcb77e68" exitCode=0 Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.813420 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vf9sl" event={"ID":"94dd5e0c-ac23-490c-a1da-3e08ca35ecda","Type":"ContainerDied","Data":"24a3ca13ff008c7d06e0a4d5171b984ad69be49dc9a56f1832351879dcb77e68"} Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.818096 4685 generic.go:334] "Generic (PLEG): container finished" podID="04f395cb-91b3-4fc2-a434-1db8e1a2d32f" containerID="7829f32e8c5c4044f07ca1f1955574525210e4b3e28cc1904eb41160658fd5b1" exitCode=0 Jan 28 12:31:28 crc kubenswrapper[4685]: I0128 12:31:28.818145 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkngf" event={"ID":"04f395cb-91b3-4fc2-a434-1db8e1a2d32f","Type":"ContainerDied","Data":"7829f32e8c5c4044f07ca1f1955574525210e4b3e28cc1904eb41160658fd5b1"} Jan 28 12:31:29 crc kubenswrapper[4685]: I0128 12:31:29.824896 4685 generic.go:334] "Generic (PLEG): container finished" podID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerID="d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a" exitCode=0 Jan 28 12:31:29 crc kubenswrapper[4685]: I0128 12:31:29.824948 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfmfw" event={"ID":"14ffad50-547e-4c2a-b72c-d6a87b50e746","Type":"ContainerDied","Data":"d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a"} Jan 28 12:31:30 crc kubenswrapper[4685]: I0128 12:31:30.832962 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vf9sl" event={"ID":"94dd5e0c-ac23-490c-a1da-3e08ca35ecda","Type":"ContainerStarted","Data":"4e5fcc12b1d9eec6464b7cc3fbfb8597bcffb897192f03f7d42517ed503e37cd"} Jan 28 12:31:30 crc kubenswrapper[4685]: I0128 12:31:30.836834 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkngf" event={"ID":"04f395cb-91b3-4fc2-a434-1db8e1a2d32f","Type":"ContainerStarted","Data":"624af537d2e5335475c036dda5b236f1315311866fcac1b703a11c6b9eea6152"} Jan 28 12:31:30 crc kubenswrapper[4685]: I0128 12:31:30.861766 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vf9sl" podStartSLOduration=3.918755042 podStartE2EDuration="6.861750497s" podCreationTimestamp="2026-01-28 12:31:24 +0000 UTC" firstStartedPulling="2026-01-28 12:31:26.790422362 +0000 UTC m=+637.877836197" lastFinishedPulling="2026-01-28 12:31:29.733417817 +0000 UTC m=+640.820831652" observedRunningTime="2026-01-28 12:31:30.855856089 +0000 UTC m=+641.943269944" watchObservedRunningTime="2026-01-28 12:31:30.861750497 +0000 UTC m=+641.949164332" Jan 28 12:31:30 crc kubenswrapper[4685]: I0128 12:31:30.892251 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jkngf" podStartSLOduration=2.499517485 podStartE2EDuration="5.892232673s" podCreationTimestamp="2026-01-28 12:31:25 +0000 UTC" firstStartedPulling="2026-01-28 12:31:26.786532512 +0000 UTC m=+637.873946377" lastFinishedPulling="2026-01-28 12:31:30.17924773 +0000 UTC m=+641.266661565" observedRunningTime="2026-01-28 12:31:30.888556448 +0000 UTC m=+641.975970293" watchObservedRunningTime="2026-01-28 12:31:30.892232673 +0000 UTC m=+641.979646508" Jan 28 12:31:31 crc kubenswrapper[4685]: I0128 12:31:31.843866 4685 generic.go:334] "Generic (PLEG): container finished" podID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerID="fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890" exitCode=0 Jan 28 12:31:31 crc kubenswrapper[4685]: I0128 12:31:31.843957 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh692" event={"ID":"d5738525-4c41-42c5-85d3-afce7539f7c8","Type":"ContainerDied","Data":"fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890"} Jan 28 12:31:32 crc kubenswrapper[4685]: I0128 12:31:32.853451 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfmfw" event={"ID":"14ffad50-547e-4c2a-b72c-d6a87b50e746","Type":"ContainerStarted","Data":"dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63"} Jan 28 12:31:33 crc kubenswrapper[4685]: I0128 12:31:33.861892 4685 generic.go:334] "Generic (PLEG): container finished" podID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerID="dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63" exitCode=0 Jan 28 12:31:33 crc kubenswrapper[4685]: I0128 12:31:33.862231 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfmfw" event={"ID":"14ffad50-547e-4c2a-b72c-d6a87b50e746","Type":"ContainerDied","Data":"dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63"} Jan 28 12:31:34 crc kubenswrapper[4685]: I0128 12:31:34.869875 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh692" event={"ID":"d5738525-4c41-42c5-85d3-afce7539f7c8","Type":"ContainerStarted","Data":"25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36"} Jan 28 12:31:34 crc kubenswrapper[4685]: I0128 12:31:34.889833 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kh692" podStartSLOduration=2.9979523009999998 podStartE2EDuration="7.889817733s" podCreationTimestamp="2026-01-28 12:31:27 +0000 UTC" firstStartedPulling="2026-01-28 12:31:28.810039198 +0000 UTC m=+639.897453033" lastFinishedPulling="2026-01-28 12:31:33.70190464 +0000 UTC m=+644.789318465" observedRunningTime="2026-01-28 12:31:34.888815024 +0000 UTC m=+645.976228859" watchObservedRunningTime="2026-01-28 12:31:34.889817733 +0000 UTC m=+645.977231568" Jan 28 12:31:35 crc kubenswrapper[4685]: I0128 12:31:35.877125 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfmfw" event={"ID":"14ffad50-547e-4c2a-b72c-d6a87b50e746","Type":"ContainerStarted","Data":"0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c"} Jan 28 12:31:35 crc kubenswrapper[4685]: I0128 12:31:35.895029 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nfmfw" podStartSLOduration=3.636511127 podStartE2EDuration="8.894999884s" podCreationTimestamp="2026-01-28 12:31:27 +0000 UTC" firstStartedPulling="2026-01-28 12:31:29.826669245 +0000 UTC m=+640.914083080" lastFinishedPulling="2026-01-28 12:31:35.085158002 +0000 UTC m=+646.172571837" observedRunningTime="2026-01-28 12:31:35.891523505 +0000 UTC m=+646.978937350" watchObservedRunningTime="2026-01-28 12:31:35.894999884 +0000 UTC m=+646.982413729" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.020876 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.021254 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.027935 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.028243 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.062682 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.070520 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.922712 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vf9sl" Jan 28 12:31:36 crc kubenswrapper[4685]: I0128 12:31:36.950774 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jkngf" Jan 28 12:31:37 crc kubenswrapper[4685]: I0128 12:31:37.528196 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:37 crc kubenswrapper[4685]: I0128 12:31:37.528252 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:37 crc kubenswrapper[4685]: I0128 12:31:37.570003 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:38 crc kubenswrapper[4685]: I0128 12:31:38.150945 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:38 crc kubenswrapper[4685]: I0128 12:31:38.153765 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:39 crc kubenswrapper[4685]: I0128 12:31:39.196616 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nfmfw" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="registry-server" probeResult="failure" output=< Jan 28 12:31:39 crc kubenswrapper[4685]: timeout: failed to connect service ":50051" within 1s Jan 28 12:31:39 crc kubenswrapper[4685]: > Jan 28 12:31:47 crc kubenswrapper[4685]: I0128 12:31:47.570822 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:31:48 crc kubenswrapper[4685]: I0128 12:31:48.349995 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:48 crc kubenswrapper[4685]: I0128 12:31:48.393911 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.069918 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.070299 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.070349 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.070985 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b08beb29fe5b7d62b07eaeb5fd852ce2c7a3d0905ce5b3b6ba6586d8fc4e3107"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.071045 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://b08beb29fe5b7d62b07eaeb5fd852ce2c7a3d0905ce5b3b6ba6586d8fc4e3107" gracePeriod=600 Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.991642 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="b08beb29fe5b7d62b07eaeb5fd852ce2c7a3d0905ce5b3b6ba6586d8fc4e3107" exitCode=0 Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.991755 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"b08beb29fe5b7d62b07eaeb5fd852ce2c7a3d0905ce5b3b6ba6586d8fc4e3107"} Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.992070 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"cf8410625584fb0c630118ee349784ba7de22b55afd9a004aa39caf181306381"} Jan 28 12:31:57 crc kubenswrapper[4685]: I0128 12:31:57.992097 4685 scope.go:117] "RemoveContainer" containerID="a525aa42b2fedd64e12200b250d522770d76d5b9f7da6fd1b15cf0f353da0c9d" Jan 28 12:33:19 crc kubenswrapper[4685]: I0128 12:33:19.674448 4685 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 12:33:57 crc kubenswrapper[4685]: I0128 12:33:57.069394 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:33:57 crc kubenswrapper[4685]: I0128 12:33:57.070059 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:34:27 crc kubenswrapper[4685]: I0128 12:34:27.069245 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:34:27 crc kubenswrapper[4685]: I0128 12:34:27.069956 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:34:57 crc kubenswrapper[4685]: I0128 12:34:57.070590 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:34:57 crc kubenswrapper[4685]: I0128 12:34:57.071327 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:34:57 crc kubenswrapper[4685]: I0128 12:34:57.071395 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:34:57 crc kubenswrapper[4685]: I0128 12:34:57.072126 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cf8410625584fb0c630118ee349784ba7de22b55afd9a004aa39caf181306381"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:34:57 crc kubenswrapper[4685]: I0128 12:34:57.072239 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://cf8410625584fb0c630118ee349784ba7de22b55afd9a004aa39caf181306381" gracePeriod=600 Jan 28 12:34:58 crc kubenswrapper[4685]: I0128 12:34:58.097302 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="cf8410625584fb0c630118ee349784ba7de22b55afd9a004aa39caf181306381" exitCode=0 Jan 28 12:34:58 crc kubenswrapper[4685]: I0128 12:34:58.100346 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"cf8410625584fb0c630118ee349784ba7de22b55afd9a004aa39caf181306381"} Jan 28 12:34:58 crc kubenswrapper[4685]: I0128 12:34:58.101065 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"121e44d10a06506539fa644a7a14ed20205554003b9b8ca8281810f57af222bf"} Jan 28 12:34:58 crc kubenswrapper[4685]: I0128 12:34:58.101094 4685 scope.go:117] "RemoveContainer" containerID="b08beb29fe5b7d62b07eaeb5fd852ce2c7a3d0905ce5b3b6ba6586d8fc4e3107" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.192632 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-94bvn"] Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.194273 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.213886 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-94bvn"] Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.302806 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9992\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-kube-api-access-w9992\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.302885 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-trusted-ca\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.302916 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-registry-tls\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.302953 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-bound-sa-token\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.303006 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.303125 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.303153 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.303199 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-registry-certificates\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.323541 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.405110 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-trusted-ca\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.405209 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-registry-tls\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.405252 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-bound-sa-token\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.405305 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.405339 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.405368 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-registry-certificates\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.405395 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9992\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-kube-api-access-w9992\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.406414 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-ca-trust-extracted\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.406824 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-trusted-ca\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.407249 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-registry-certificates\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.412932 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-registry-tls\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.413374 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-installation-pull-secrets\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.423619 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-bound-sa-token\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.424139 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9992\" (UniqueName: \"kubernetes.io/projected/56b92c9c-7b96-4956-a902-e38c8a3f4cc7-kube-api-access-w9992\") pod \"image-registry-66df7c8f76-94bvn\" (UID: \"56b92c9c-7b96-4956-a902-e38c8a3f4cc7\") " pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.509481 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:10 crc kubenswrapper[4685]: I0128 12:35:10.708152 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-94bvn"] Jan 28 12:35:11 crc kubenswrapper[4685]: I0128 12:35:11.184059 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" event={"ID":"56b92c9c-7b96-4956-a902-e38c8a3f4cc7","Type":"ContainerStarted","Data":"37d0718de27f001b60bad5f2d860a85eb6bd2f519aa86c7357748dc9c094b3fd"} Jan 28 12:35:11 crc kubenswrapper[4685]: I0128 12:35:11.184543 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" event={"ID":"56b92c9c-7b96-4956-a902-e38c8a3f4cc7","Type":"ContainerStarted","Data":"67ea0c78500e0dd4b97c281ec08e020eecb1a0744d60d797dfdf0187a803a811"} Jan 28 12:35:11 crc kubenswrapper[4685]: I0128 12:35:11.209501 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" podStartSLOduration=1.209478343 podStartE2EDuration="1.209478343s" podCreationTimestamp="2026-01-28 12:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:35:11.205989414 +0000 UTC m=+862.293403269" watchObservedRunningTime="2026-01-28 12:35:11.209478343 +0000 UTC m=+862.296892188" Jan 28 12:35:12 crc kubenswrapper[4685]: I0128 12:35:12.191764 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:30 crc kubenswrapper[4685]: I0128 12:35:30.514450 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-94bvn" Jan 28 12:35:30 crc kubenswrapper[4685]: I0128 12:35:30.573325 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dxl25"] Jan 28 12:35:55 crc kubenswrapper[4685]: I0128 12:35:55.612697 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" podUID="46b82026-d586-40b2-ad5b-fc08674d7067" containerName="registry" containerID="cri-o://a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d" gracePeriod=30 Jan 28 12:35:55 crc kubenswrapper[4685]: I0128 12:35:55.943756 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097627 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097682 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-registry-tls\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097709 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxsgk\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-kube-api-access-wxsgk\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097743 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-bound-sa-token\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097809 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46b82026-d586-40b2-ad5b-fc08674d7067-installation-pull-secrets\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097857 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-registry-certificates\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097883 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46b82026-d586-40b2-ad5b-fc08674d7067-ca-trust-extracted\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.097936 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-trusted-ca\") pod \"46b82026-d586-40b2-ad5b-fc08674d7067\" (UID: \"46b82026-d586-40b2-ad5b-fc08674d7067\") " Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.099080 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.099127 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.102895 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.103143 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46b82026-d586-40b2-ad5b-fc08674d7067-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.103746 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.106816 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-kube-api-access-wxsgk" (OuterVolumeSpecName: "kube-api-access-wxsgk") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "kube-api-access-wxsgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.116082 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46b82026-d586-40b2-ad5b-fc08674d7067-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.131573 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "46b82026-d586-40b2-ad5b-fc08674d7067" (UID: "46b82026-d586-40b2-ad5b-fc08674d7067"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.200670 4685 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/46b82026-d586-40b2-ad5b-fc08674d7067-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.200722 4685 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.200739 4685 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/46b82026-d586-40b2-ad5b-fc08674d7067-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.200751 4685 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46b82026-d586-40b2-ad5b-fc08674d7067-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.200764 4685 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.200775 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxsgk\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-kube-api-access-wxsgk\") on node \"crc\" DevicePath \"\"" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.200787 4685 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46b82026-d586-40b2-ad5b-fc08674d7067-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.486007 4685 generic.go:334] "Generic (PLEG): container finished" podID="46b82026-d586-40b2-ad5b-fc08674d7067" containerID="a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d" exitCode=0 Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.486067 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" event={"ID":"46b82026-d586-40b2-ad5b-fc08674d7067","Type":"ContainerDied","Data":"a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d"} Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.486088 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.486116 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dxl25" event={"ID":"46b82026-d586-40b2-ad5b-fc08674d7067","Type":"ContainerDied","Data":"ddb43fb78c7d23ef980b6048567ed165fef5a9cf5560c0c6e5cb5f18f470338f"} Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.486244 4685 scope.go:117] "RemoveContainer" containerID="a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.511846 4685 scope.go:117] "RemoveContainer" containerID="a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d" Jan 28 12:35:56 crc kubenswrapper[4685]: E0128 12:35:56.512341 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d\": container with ID starting with a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d not found: ID does not exist" containerID="a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.512389 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d"} err="failed to get container status \"a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d\": rpc error: code = NotFound desc = could not find container \"a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d\": container with ID starting with a9a1cbdb362a1c93b929fa546efa46c12ded9a67a6833ef11a13ee18f15a575d not found: ID does not exist" Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.522436 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dxl25"] Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.532358 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dxl25"] Jan 28 12:35:56 crc kubenswrapper[4685]: I0128 12:35:56.558639 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46b82026-d586-40b2-ad5b-fc08674d7067" path="/var/lib/kubelet/pods/46b82026-d586-40b2-ad5b-fc08674d7067/volumes" Jan 28 12:36:57 crc kubenswrapper[4685]: I0128 12:36:57.070244 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:36:57 crc kubenswrapper[4685]: I0128 12:36:57.071192 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:37:27 crc kubenswrapper[4685]: I0128 12:37:27.069254 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:37:27 crc kubenswrapper[4685]: I0128 12:37:27.070297 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.595866 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-b85rl"] Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.597831 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="sbdb" containerID="cri-o://4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78" gracePeriod=30 Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.597860 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183" gracePeriod=30 Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.597908 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="northd" containerID="cri-o://ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7" gracePeriod=30 Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.597923 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-acl-logging" containerID="cri-o://720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88" gracePeriod=30 Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.597896 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-node" containerID="cri-o://980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e" gracePeriod=30 Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.598306 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="nbdb" containerID="cri-o://757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12" gracePeriod=30 Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.597154 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-controller" containerID="cri-o://750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c" gracePeriod=30 Jan 28 12:37:32 crc kubenswrapper[4685]: I0128 12:37:32.635499 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" containerID="cri-o://8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb" gracePeriod=30 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.095955 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovnkube-controller/2.log" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.099014 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovn-acl-logging/0.log" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.099560 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovn-controller/0.log" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100023 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb" exitCode=0 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100192 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78" exitCode=0 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100281 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12" exitCode=0 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100371 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183" exitCode=0 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100392 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e" exitCode=0 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100407 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88" exitCode=143 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100421 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c" exitCode=143 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100038 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100472 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100488 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100499 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100511 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100522 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100533 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.100552 4685 scope.go:117] "RemoveContainer" containerID="426526fda28c0cc148c7320abdc7ec3f06c335f12e7adc4d80f28f4b8fb6fda1" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.102786 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rrnv6_28aac5d8-57ac-4302-ab17-c07f33fcaffd/kube-multus/1.log" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.103394 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rrnv6_28aac5d8-57ac-4302-ab17-c07f33fcaffd/kube-multus/0.log" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.103437 4685 generic.go:334] "Generic (PLEG): container finished" podID="28aac5d8-57ac-4302-ab17-c07f33fcaffd" containerID="ecaf6ac86fff546861ecdfe2860a2f6c859ee43807bd8a8384c9567315300893" exitCode=2 Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.103462 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rrnv6" event={"ID":"28aac5d8-57ac-4302-ab17-c07f33fcaffd","Type":"ContainerDied","Data":"ecaf6ac86fff546861ecdfe2860a2f6c859ee43807bd8a8384c9567315300893"} Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.103900 4685 scope.go:117] "RemoveContainer" containerID="ecaf6ac86fff546861ecdfe2860a2f6c859ee43807bd8a8384c9567315300893" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.241066 4685 scope.go:117] "RemoveContainer" containerID="b2cbd3507fb24e0d5b63de22fdb569fbcea539faeec524c4baf854910e92b4c8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.846748 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovn-acl-logging/0.log" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.849496 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovn-controller/0.log" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.850052 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.901717 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-ntkm8"] Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904257 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904283 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904293 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904299 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904307 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-acl-logging" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904313 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-acl-logging" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904321 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b82026-d586-40b2-ad5b-fc08674d7067" containerName="registry" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904327 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b82026-d586-40b2-ad5b-fc08674d7067" containerName="registry" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904334 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kubecfg-setup" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904340 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kubecfg-setup" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904347 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="northd" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904354 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="northd" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904362 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904368 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904381 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-node" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904387 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-node" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904403 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904414 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904420 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="nbdb" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904426 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="nbdb" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904435 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904441 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904453 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="sbdb" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904459 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="sbdb" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904577 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904585 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="nbdb" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904594 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904601 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="northd" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904607 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904614 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904621 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="kube-rbac-proxy-node" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904627 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="sbdb" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904634 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904645 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="46b82026-d586-40b2-ad5b-fc08674d7067" containerName="registry" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904651 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovn-acl-logging" Jan 28 12:37:33 crc kubenswrapper[4685]: E0128 12:37:33.904747 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904754 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.904855 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerName="ovnkube-controller" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.906342 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924143 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-bin\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924187 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-systemd-units\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924204 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-openvswitch\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924225 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-node-log\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924248 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-env-overrides\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924261 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-systemd\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924277 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-config\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924296 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-netd\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924313 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-netns\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924332 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-slash\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924360 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovn-node-metrics-cert\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924377 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-ovn\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924392 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-var-lib-openvswitch\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924407 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-ovn-kubernetes\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924424 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-kubelet\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924442 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-862jf\" (UniqueName: \"kubernetes.io/projected/6fd64f35-81dc-4978-84e8-a746e9a79ccd-kube-api-access-862jf\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924455 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-log-socket\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924476 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-script-lib\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924496 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924513 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-etc-openvswitch\") pod \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\" (UID: \"6fd64f35-81dc-4978-84e8-a746e9a79ccd\") " Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924599 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-kubelet\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924622 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924641 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-env-overrides\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924659 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb5v2\" (UniqueName: \"kubernetes.io/projected/34455271-ba47-4cca-8d11-43df49813020-kube-api-access-qb5v2\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924681 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/34455271-ba47-4cca-8d11-43df49813020-ovn-node-metrics-cert\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924697 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-cni-netd\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924713 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-etc-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924725 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-ovnkube-config\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924740 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-systemd\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924759 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-cni-bin\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924775 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-systemd-units\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924789 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-run-netns\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924806 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-run-ovn-kubernetes\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924826 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924851 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-ovnkube-script-lib\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924865 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-var-lib-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924879 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-ovn\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924893 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-node-log\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924910 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-log-socket\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924926 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-slash\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.924991 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.925013 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.925029 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.925044 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-node-log" (OuterVolumeSpecName: "node-log") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.925419 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.926287 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.926407 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.926495 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.926634 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.926669 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-slash" (OuterVolumeSpecName: "host-slash") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.926818 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.927376 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.927454 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.927485 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.927642 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.928000 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-log-socket" (OuterVolumeSpecName: "log-socket") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.928394 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.932905 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.934381 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fd64f35-81dc-4978-84e8-a746e9a79ccd-kube-api-access-862jf" (OuterVolumeSpecName: "kube-api-access-862jf") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "kube-api-access-862jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:37:33 crc kubenswrapper[4685]: I0128 12:37:33.953655 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "6fd64f35-81dc-4978-84e8-a746e9a79ccd" (UID: "6fd64f35-81dc-4978-84e8-a746e9a79ccd"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025471 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025751 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-env-overrides\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025776 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb5v2\" (UniqueName: \"kubernetes.io/projected/34455271-ba47-4cca-8d11-43df49813020-kube-api-access-qb5v2\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025798 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/34455271-ba47-4cca-8d11-43df49813020-ovn-node-metrics-cert\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025828 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-cni-netd\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025859 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-etc-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025878 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-ovnkube-config\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025899 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-systemd\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025924 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-cni-bin\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025963 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-systemd-units\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025984 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-run-netns\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026008 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-run-ovn-kubernetes\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026036 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026075 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-ovnkube-script-lib\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026095 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-var-lib-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026116 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-ovn\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026136 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-node-log\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026154 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-cni-bin\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026164 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-log-socket\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026213 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-log-socket\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026230 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-slash\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026256 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-systemd-units\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026259 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-kubelet\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026282 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-kubelet\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026323 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-cni-netd\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.025603 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026332 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-run-netns\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026345 4685 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-node-log\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026366 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-etc-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026384 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-env-overrides\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026821 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-node-log\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026842 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-ovn\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026866 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026886 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-run-ovn-kubernetes\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026951 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-ovnkube-config\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.026984 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-run-systemd\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027005 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-host-slash\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027028 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/34455271-ba47-4cca-8d11-43df49813020-var-lib-openvswitch\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027045 4685 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027212 4685 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027228 4685 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027241 4685 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027253 4685 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027265 4685 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-slash\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027276 4685 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027289 4685 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027300 4685 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027310 4685 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027322 4685 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027332 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-862jf\" (UniqueName: \"kubernetes.io/projected/6fd64f35-81dc-4978-84e8-a746e9a79ccd-kube-api-access-862jf\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027343 4685 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-log-socket\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027355 4685 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fd64f35-81dc-4978-84e8-a746e9a79ccd-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027367 4685 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027378 4685 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027391 4685 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027395 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/34455271-ba47-4cca-8d11-43df49813020-ovnkube-script-lib\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027401 4685 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.027411 4685 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fd64f35-81dc-4978-84e8-a746e9a79ccd-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.030010 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/34455271-ba47-4cca-8d11-43df49813020-ovn-node-metrics-cert\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.041423 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb5v2\" (UniqueName: \"kubernetes.io/projected/34455271-ba47-4cca-8d11-43df49813020-kube-api-access-qb5v2\") pod \"ovnkube-node-ntkm8\" (UID: \"34455271-ba47-4cca-8d11-43df49813020\") " pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.109834 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rrnv6_28aac5d8-57ac-4302-ab17-c07f33fcaffd/kube-multus/1.log" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.110090 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rrnv6" event={"ID":"28aac5d8-57ac-4302-ab17-c07f33fcaffd","Type":"ContainerStarted","Data":"57b1a1868adbcba1c64cc07bbec7fe8b6ece1987e498c113bc9b914cba235334"} Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.115570 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovn-acl-logging/0.log" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.116109 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-b85rl_6fd64f35-81dc-4978-84e8-a746e9a79ccd/ovn-controller/0.log" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.116547 4685 generic.go:334] "Generic (PLEG): container finished" podID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" containerID="ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7" exitCode=0 Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.116599 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7"} Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.116625 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" event={"ID":"6fd64f35-81dc-4978-84e8-a746e9a79ccd","Type":"ContainerDied","Data":"08cf184a0195cda1de29e3a1d361189133ca8c6a64f916f37b2cafecfe2c6852"} Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.116645 4685 scope.go:117] "RemoveContainer" containerID="8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.116761 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-b85rl" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.162057 4685 scope.go:117] "RemoveContainer" containerID="4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.164433 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-b85rl"] Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.169555 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-b85rl"] Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.181665 4685 scope.go:117] "RemoveContainer" containerID="757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.195398 4685 scope.go:117] "RemoveContainer" containerID="ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.210319 4685 scope.go:117] "RemoveContainer" containerID="bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.220570 4685 scope.go:117] "RemoveContainer" containerID="980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.223265 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.238327 4685 scope.go:117] "RemoveContainer" containerID="720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88" Jan 28 12:37:34 crc kubenswrapper[4685]: W0128 12:37:34.243101 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34455271_ba47_4cca_8d11_43df49813020.slice/crio-f0351e245b145eadab3771b9b86c09a09ba6103fa8eb9a6eee92e4dd8b8e949b WatchSource:0}: Error finding container f0351e245b145eadab3771b9b86c09a09ba6103fa8eb9a6eee92e4dd8b8e949b: Status 404 returned error can't find the container with id f0351e245b145eadab3771b9b86c09a09ba6103fa8eb9a6eee92e4dd8b8e949b Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.251245 4685 scope.go:117] "RemoveContainer" containerID="750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.266427 4685 scope.go:117] "RemoveContainer" containerID="62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.281573 4685 scope.go:117] "RemoveContainer" containerID="8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.282036 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb\": container with ID starting with 8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb not found: ID does not exist" containerID="8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.282129 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb"} err="failed to get container status \"8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb\": rpc error: code = NotFound desc = could not find container \"8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb\": container with ID starting with 8b810b25ef2bcbe754ab1930aa8730f24bfaa1065df52b08a31ee753d2a478fb not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.282248 4685 scope.go:117] "RemoveContainer" containerID="4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.282526 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\": container with ID starting with 4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78 not found: ID does not exist" containerID="4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.282616 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78"} err="failed to get container status \"4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\": rpc error: code = NotFound desc = could not find container \"4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78\": container with ID starting with 4ca2cd27ed42277b4da09046487fac202d4dbd6653e107aea64641d070f62b78 not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.282685 4685 scope.go:117] "RemoveContainer" containerID="757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.283047 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\": container with ID starting with 757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12 not found: ID does not exist" containerID="757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.283131 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12"} err="failed to get container status \"757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\": rpc error: code = NotFound desc = could not find container \"757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12\": container with ID starting with 757589d36875f7ed6d437a9cf69d5591d19c64f5283a10cab5bdf33813008f12 not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.283225 4685 scope.go:117] "RemoveContainer" containerID="ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.283508 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\": container with ID starting with ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7 not found: ID does not exist" containerID="ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.283534 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7"} err="failed to get container status \"ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\": rpc error: code = NotFound desc = could not find container \"ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7\": container with ID starting with ffc700988af9f6fdc8d360f7b314511b2530cc200300de8fc231fd2a080ca2d7 not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.283551 4685 scope.go:117] "RemoveContainer" containerID="bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.283760 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\": container with ID starting with bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183 not found: ID does not exist" containerID="bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.283859 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183"} err="failed to get container status \"bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\": rpc error: code = NotFound desc = could not find container \"bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183\": container with ID starting with bc634e627ab757db68eb992306f16abad63215752c900223ead9683d8f325183 not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.283920 4685 scope.go:117] "RemoveContainer" containerID="980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.284173 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\": container with ID starting with 980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e not found: ID does not exist" containerID="980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.284273 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e"} err="failed to get container status \"980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\": rpc error: code = NotFound desc = could not find container \"980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e\": container with ID starting with 980aa84ac582070cc735ed9adcac760c5fd9df91368f0b50b151ef65b0f6cd8e not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.284338 4685 scope.go:117] "RemoveContainer" containerID="720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.284641 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\": container with ID starting with 720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88 not found: ID does not exist" containerID="720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.284714 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88"} err="failed to get container status \"720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\": rpc error: code = NotFound desc = could not find container \"720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88\": container with ID starting with 720a8708998bf446272a7dd0ba606cba7d1b4343c4db9f9cd0f830ba4aebca88 not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.284778 4685 scope.go:117] "RemoveContainer" containerID="750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.285004 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\": container with ID starting with 750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c not found: ID does not exist" containerID="750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.285075 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c"} err="failed to get container status \"750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\": rpc error: code = NotFound desc = could not find container \"750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c\": container with ID starting with 750466ef42f1388e20b44369bf7a70c00ccb19b202c278cb7634dda6411f219c not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.285133 4685 scope.go:117] "RemoveContainer" containerID="62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c" Jan 28 12:37:34 crc kubenswrapper[4685]: E0128 12:37:34.285450 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\": container with ID starting with 62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c not found: ID does not exist" containerID="62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.285524 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c"} err="failed to get container status \"62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\": rpc error: code = NotFound desc = could not find container \"62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c\": container with ID starting with 62f300d30d67f266b966f74b893e901c5453efb253ae5cdab5d68ab23bfd925c not found: ID does not exist" Jan 28 12:37:34 crc kubenswrapper[4685]: I0128 12:37:34.554678 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fd64f35-81dc-4978-84e8-a746e9a79ccd" path="/var/lib/kubelet/pods/6fd64f35-81dc-4978-84e8-a746e9a79ccd/volumes" Jan 28 12:37:35 crc kubenswrapper[4685]: I0128 12:37:35.124320 4685 generic.go:334] "Generic (PLEG): container finished" podID="34455271-ba47-4cca-8d11-43df49813020" containerID="a1c6093dc9a065749c068317b8c5e264508f56385c591c23ea127fdf6a725250" exitCode=0 Jan 28 12:37:35 crc kubenswrapper[4685]: I0128 12:37:35.124422 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerDied","Data":"a1c6093dc9a065749c068317b8c5e264508f56385c591c23ea127fdf6a725250"} Jan 28 12:37:35 crc kubenswrapper[4685]: I0128 12:37:35.124626 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"f0351e245b145eadab3771b9b86c09a09ba6103fa8eb9a6eee92e4dd8b8e949b"} Jan 28 12:37:36 crc kubenswrapper[4685]: I0128 12:37:36.136708 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"787d3c7949c4c8fd3797c3ea03352de684ab7286b9921475768d88e465fea201"} Jan 28 12:37:36 crc kubenswrapper[4685]: I0128 12:37:36.137080 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"d0aa0d06ce4c15dce95462d22cd67ead7b595710fbda0853cef3e3ac3f58c572"} Jan 28 12:37:36 crc kubenswrapper[4685]: I0128 12:37:36.137096 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"d532d7824dec306945cd8a54397f61debb7a5465d4972275f88088f43645b652"} Jan 28 12:37:36 crc kubenswrapper[4685]: I0128 12:37:36.137108 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"58da601ef573ae961314412cd7065feb09ce61db8c2c6b6f1b7c0752d53c49bf"} Jan 28 12:37:37 crc kubenswrapper[4685]: I0128 12:37:37.151307 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"a3aa30a2d18a320d8d33cc4a2e986382dfba252ee56d5b60211855d79ac24b7a"} Jan 28 12:37:37 crc kubenswrapper[4685]: I0128 12:37:37.153450 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"ddbf482e91d41724ac014d1bb32ad3e1d6c8fcb4f98403c5cc0c38651da2c9f8"} Jan 28 12:37:37 crc kubenswrapper[4685]: I0128 12:37:37.153517 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"cd67084031fbd807a339ac80741d82b98295fc867e1e723a43f541d07734838c"} Jan 28 12:37:43 crc kubenswrapper[4685]: I0128 12:37:43.191744 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" event={"ID":"34455271-ba47-4cca-8d11-43df49813020","Type":"ContainerStarted","Data":"02d83640493a1743ef011be8ef57840e8abe08b0f55821344985906f32fac75a"} Jan 28 12:37:44 crc kubenswrapper[4685]: I0128 12:37:44.197243 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:44 crc kubenswrapper[4685]: I0128 12:37:44.197286 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:44 crc kubenswrapper[4685]: I0128 12:37:44.223620 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:44 crc kubenswrapper[4685]: I0128 12:37:44.259394 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:44 crc kubenswrapper[4685]: I0128 12:37:44.259454 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:37:44 crc kubenswrapper[4685]: I0128 12:37:44.295669 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" podStartSLOduration=11.295652293 podStartE2EDuration="11.295652293s" podCreationTimestamp="2026-01-28 12:37:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:37:44.2367302 +0000 UTC m=+1015.324144095" watchObservedRunningTime="2026-01-28 12:37:44.295652293 +0000 UTC m=+1015.383066128" Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.069704 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.071277 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.071518 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.072364 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"121e44d10a06506539fa644a7a14ed20205554003b9b8ca8281810f57af222bf"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.072522 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://121e44d10a06506539fa644a7a14ed20205554003b9b8ca8281810f57af222bf" gracePeriod=600 Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.289118 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="121e44d10a06506539fa644a7a14ed20205554003b9b8ca8281810f57af222bf" exitCode=0 Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.289180 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"121e44d10a06506539fa644a7a14ed20205554003b9b8ca8281810f57af222bf"} Jan 28 12:37:57 crc kubenswrapper[4685]: I0128 12:37:57.289219 4685 scope.go:117] "RemoveContainer" containerID="cf8410625584fb0c630118ee349784ba7de22b55afd9a004aa39caf181306381" Jan 28 12:37:58 crc kubenswrapper[4685]: I0128 12:37:58.298561 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"62facba6395aa0c2d16f630a44d637954256b42216e7c75dfe642554c0dcdb37"} Jan 28 12:38:04 crc kubenswrapper[4685]: I0128 12:38:04.258753 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-ntkm8" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.606636 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q"] Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.607929 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.611031 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.616846 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q"] Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.671698 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2vqr\" (UniqueName: \"kubernetes.io/projected/9444ee44-142c-4fad-a9c3-9ab30638416d-kube-api-access-j2vqr\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.671774 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.671843 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.772739 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2vqr\" (UniqueName: \"kubernetes.io/projected/9444ee44-142c-4fad-a9c3-9ab30638416d-kube-api-access-j2vqr\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.773062 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.773226 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.773460 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.773827 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.792661 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2vqr\" (UniqueName: \"kubernetes.io/projected/9444ee44-142c-4fad-a9c3-9ab30638416d-kube-api-access-j2vqr\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:06 crc kubenswrapper[4685]: I0128 12:38:06.926641 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:07 crc kubenswrapper[4685]: I0128 12:38:07.309949 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q"] Jan 28 12:38:07 crc kubenswrapper[4685]: I0128 12:38:07.344675 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" event={"ID":"9444ee44-142c-4fad-a9c3-9ab30638416d","Type":"ContainerStarted","Data":"8b916584c2000207ba40346ab6d29d5fc414187dc101dec09f3c13385e94edf8"} Jan 28 12:38:08 crc kubenswrapper[4685]: I0128 12:38:08.352359 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" event={"ID":"9444ee44-142c-4fad-a9c3-9ab30638416d","Type":"ContainerStarted","Data":"b601a7cbae9b644547f6b978f7647eb9cd06c8aa1075b7ab8f91045d610df6f4"} Jan 28 12:38:08 crc kubenswrapper[4685]: I0128 12:38:08.968316 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4p2sq"] Jan 28 12:38:08 crc kubenswrapper[4685]: I0128 12:38:08.969480 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:08 crc kubenswrapper[4685]: I0128 12:38:08.978905 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4p2sq"] Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.001367 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2srs\" (UniqueName: \"kubernetes.io/projected/843e2c19-08d8-4669-9a4d-0260bd34d3cf-kube-api-access-s2srs\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.001533 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-catalog-content\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.001620 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-utilities\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.103526 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2srs\" (UniqueName: \"kubernetes.io/projected/843e2c19-08d8-4669-9a4d-0260bd34d3cf-kube-api-access-s2srs\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.103610 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-catalog-content\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.103634 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-utilities\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.104117 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-utilities\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.104114 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-catalog-content\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.123671 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2srs\" (UniqueName: \"kubernetes.io/projected/843e2c19-08d8-4669-9a4d-0260bd34d3cf-kube-api-access-s2srs\") pod \"redhat-operators-4p2sq\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.286080 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.361124 4685 generic.go:334] "Generic (PLEG): container finished" podID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerID="b601a7cbae9b644547f6b978f7647eb9cd06c8aa1075b7ab8f91045d610df6f4" exitCode=0 Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.361198 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" event={"ID":"9444ee44-142c-4fad-a9c3-9ab30638416d","Type":"ContainerDied","Data":"b601a7cbae9b644547f6b978f7647eb9cd06c8aa1075b7ab8f91045d610df6f4"} Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.363148 4685 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:38:09 crc kubenswrapper[4685]: I0128 12:38:09.489957 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4p2sq"] Jan 28 12:38:09 crc kubenswrapper[4685]: W0128 12:38:09.501407 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod843e2c19_08d8_4669_9a4d_0260bd34d3cf.slice/crio-de35f5e5cbf2f86561e919f1cc69880aa8eb23e8f1628428a8c4b2ab55738b5e WatchSource:0}: Error finding container de35f5e5cbf2f86561e919f1cc69880aa8eb23e8f1628428a8c4b2ab55738b5e: Status 404 returned error can't find the container with id de35f5e5cbf2f86561e919f1cc69880aa8eb23e8f1628428a8c4b2ab55738b5e Jan 28 12:38:10 crc kubenswrapper[4685]: I0128 12:38:10.367213 4685 generic.go:334] "Generic (PLEG): container finished" podID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerID="6b81a9fc75bd130903d64a640180c7505f380bd506837705473667b708dd925d" exitCode=0 Jan 28 12:38:10 crc kubenswrapper[4685]: I0128 12:38:10.367538 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p2sq" event={"ID":"843e2c19-08d8-4669-9a4d-0260bd34d3cf","Type":"ContainerDied","Data":"6b81a9fc75bd130903d64a640180c7505f380bd506837705473667b708dd925d"} Jan 28 12:38:10 crc kubenswrapper[4685]: I0128 12:38:10.367586 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p2sq" event={"ID":"843e2c19-08d8-4669-9a4d-0260bd34d3cf","Type":"ContainerStarted","Data":"de35f5e5cbf2f86561e919f1cc69880aa8eb23e8f1628428a8c4b2ab55738b5e"} Jan 28 12:38:15 crc kubenswrapper[4685]: I0128 12:38:15.394847 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p2sq" event={"ID":"843e2c19-08d8-4669-9a4d-0260bd34d3cf","Type":"ContainerStarted","Data":"6688582f5831c506d7099bba9fff9ce7441a9b06f866bd0659e6c917a2cd95b6"} Jan 28 12:38:15 crc kubenswrapper[4685]: I0128 12:38:15.397633 4685 generic.go:334] "Generic (PLEG): container finished" podID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerID="7854dd4054ece8a47a16dbf7c35db3d64f6e2f04e4d842c7ec367c54114265a9" exitCode=0 Jan 28 12:38:15 crc kubenswrapper[4685]: I0128 12:38:15.397695 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" event={"ID":"9444ee44-142c-4fad-a9c3-9ab30638416d","Type":"ContainerDied","Data":"7854dd4054ece8a47a16dbf7c35db3d64f6e2f04e4d842c7ec367c54114265a9"} Jan 28 12:38:16 crc kubenswrapper[4685]: I0128 12:38:16.405932 4685 generic.go:334] "Generic (PLEG): container finished" podID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerID="6688582f5831c506d7099bba9fff9ce7441a9b06f866bd0659e6c917a2cd95b6" exitCode=0 Jan 28 12:38:16 crc kubenswrapper[4685]: I0128 12:38:16.406008 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p2sq" event={"ID":"843e2c19-08d8-4669-9a4d-0260bd34d3cf","Type":"ContainerDied","Data":"6688582f5831c506d7099bba9fff9ce7441a9b06f866bd0659e6c917a2cd95b6"} Jan 28 12:38:16 crc kubenswrapper[4685]: I0128 12:38:16.408581 4685 generic.go:334] "Generic (PLEG): container finished" podID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerID="96b9587c5003f4d1f3c128bf4179e87b780a2720049a02ff9e641cce65f52a45" exitCode=0 Jan 28 12:38:16 crc kubenswrapper[4685]: I0128 12:38:16.408605 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" event={"ID":"9444ee44-142c-4fad-a9c3-9ab30638416d","Type":"ContainerDied","Data":"96b9587c5003f4d1f3c128bf4179e87b780a2720049a02ff9e641cce65f52a45"} Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.646358 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.719974 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-util\") pod \"9444ee44-142c-4fad-a9c3-9ab30638416d\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.720052 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2vqr\" (UniqueName: \"kubernetes.io/projected/9444ee44-142c-4fad-a9c3-9ab30638416d-kube-api-access-j2vqr\") pod \"9444ee44-142c-4fad-a9c3-9ab30638416d\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.720080 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-bundle\") pod \"9444ee44-142c-4fad-a9c3-9ab30638416d\" (UID: \"9444ee44-142c-4fad-a9c3-9ab30638416d\") " Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.721265 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-bundle" (OuterVolumeSpecName: "bundle") pod "9444ee44-142c-4fad-a9c3-9ab30638416d" (UID: "9444ee44-142c-4fad-a9c3-9ab30638416d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.725797 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9444ee44-142c-4fad-a9c3-9ab30638416d-kube-api-access-j2vqr" (OuterVolumeSpecName: "kube-api-access-j2vqr") pod "9444ee44-142c-4fad-a9c3-9ab30638416d" (UID: "9444ee44-142c-4fad-a9c3-9ab30638416d"). InnerVolumeSpecName "kube-api-access-j2vqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.735451 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-util" (OuterVolumeSpecName: "util") pod "9444ee44-142c-4fad-a9c3-9ab30638416d" (UID: "9444ee44-142c-4fad-a9c3-9ab30638416d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.821018 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.821045 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2vqr\" (UniqueName: \"kubernetes.io/projected/9444ee44-142c-4fad-a9c3-9ab30638416d-kube-api-access-j2vqr\") on node \"crc\" DevicePath \"\"" Jan 28 12:38:17 crc kubenswrapper[4685]: I0128 12:38:17.821055 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9444ee44-142c-4fad-a9c3-9ab30638416d-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:38:18 crc kubenswrapper[4685]: I0128 12:38:18.420911 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p2sq" event={"ID":"843e2c19-08d8-4669-9a4d-0260bd34d3cf","Type":"ContainerStarted","Data":"61004529d97c39d4f9035a7ad02f216d49913d20a8958e0644099ba3a13dd925"} Jan 28 12:38:18 crc kubenswrapper[4685]: I0128 12:38:18.424033 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" event={"ID":"9444ee44-142c-4fad-a9c3-9ab30638416d","Type":"ContainerDied","Data":"8b916584c2000207ba40346ab6d29d5fc414187dc101dec09f3c13385e94edf8"} Jan 28 12:38:18 crc kubenswrapper[4685]: I0128 12:38:18.424076 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b916584c2000207ba40346ab6d29d5fc414187dc101dec09f3c13385e94edf8" Jan 28 12:38:18 crc kubenswrapper[4685]: I0128 12:38:18.424079 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q" Jan 28 12:38:18 crc kubenswrapper[4685]: I0128 12:38:18.442255 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4p2sq" podStartSLOduration=2.991776648 podStartE2EDuration="10.442239809s" podCreationTimestamp="2026-01-28 12:38:08 +0000 UTC" firstStartedPulling="2026-01-28 12:38:10.368994366 +0000 UTC m=+1041.456408191" lastFinishedPulling="2026-01-28 12:38:17.819457527 +0000 UTC m=+1048.906871352" observedRunningTime="2026-01-28 12:38:18.440896781 +0000 UTC m=+1049.528310616" watchObservedRunningTime="2026-01-28 12:38:18.442239809 +0000 UTC m=+1049.529653644" Jan 28 12:38:19 crc kubenswrapper[4685]: I0128 12:38:19.286595 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:19 crc kubenswrapper[4685]: I0128 12:38:19.286666 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:20 crc kubenswrapper[4685]: I0128 12:38:20.337104 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4p2sq" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="registry-server" probeResult="failure" output=< Jan 28 12:38:20 crc kubenswrapper[4685]: timeout: failed to connect service ":50051" within 1s Jan 28 12:38:20 crc kubenswrapper[4685]: > Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.601569 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k"] Jan 28 12:38:24 crc kubenswrapper[4685]: E0128 12:38:24.601855 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerName="extract" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.601874 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerName="extract" Jan 28 12:38:24 crc kubenswrapper[4685]: E0128 12:38:24.601897 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerName="pull" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.601904 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerName="pull" Jan 28 12:38:24 crc kubenswrapper[4685]: E0128 12:38:24.601920 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerName="util" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.601928 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerName="util" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.602091 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="9444ee44-142c-4fad-a9c3-9ab30638416d" containerName="extract" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.602630 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.605227 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.605401 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.605513 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-zddxf" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.605628 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.605807 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.616906 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k"] Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.703781 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/180820f2-a4dc-4daf-9e46-f065f20cb559-webhook-cert\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.703864 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qnfh\" (UniqueName: \"kubernetes.io/projected/180820f2-a4dc-4daf-9e46-f065f20cb559-kube-api-access-8qnfh\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.703919 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/180820f2-a4dc-4daf-9e46-f065f20cb559-apiservice-cert\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.804763 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qnfh\" (UniqueName: \"kubernetes.io/projected/180820f2-a4dc-4daf-9e46-f065f20cb559-kube-api-access-8qnfh\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.804848 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/180820f2-a4dc-4daf-9e46-f065f20cb559-apiservice-cert\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.804904 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/180820f2-a4dc-4daf-9e46-f065f20cb559-webhook-cert\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.811920 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/180820f2-a4dc-4daf-9e46-f065f20cb559-webhook-cert\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.824651 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qnfh\" (UniqueName: \"kubernetes.io/projected/180820f2-a4dc-4daf-9e46-f065f20cb559-kube-api-access-8qnfh\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.827319 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/180820f2-a4dc-4daf-9e46-f065f20cb559-apiservice-cert\") pod \"metallb-operator-controller-manager-686c4fd867-8j42k\" (UID: \"180820f2-a4dc-4daf-9e46-f065f20cb559\") " pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.925543 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.968326 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh"] Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.969104 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.975119 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.975277 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zbpb7" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.975329 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 12:38:24 crc kubenswrapper[4685]: I0128 12:38:24.989466 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh"] Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.009963 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1323c35f-78a9-41e6-971c-0556d1bbdade-apiservice-cert\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.010070 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1323c35f-78a9-41e6-971c-0556d1bbdade-webhook-cert\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.010139 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v6jq\" (UniqueName: \"kubernetes.io/projected/1323c35f-78a9-41e6-971c-0556d1bbdade-kube-api-access-7v6jq\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.112201 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1323c35f-78a9-41e6-971c-0556d1bbdade-apiservice-cert\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.112600 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1323c35f-78a9-41e6-971c-0556d1bbdade-webhook-cert\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.112640 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v6jq\" (UniqueName: \"kubernetes.io/projected/1323c35f-78a9-41e6-971c-0556d1bbdade-kube-api-access-7v6jq\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.117789 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1323c35f-78a9-41e6-971c-0556d1bbdade-apiservice-cert\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.117797 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1323c35f-78a9-41e6-971c-0556d1bbdade-webhook-cert\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.141678 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v6jq\" (UniqueName: \"kubernetes.io/projected/1323c35f-78a9-41e6-971c-0556d1bbdade-kube-api-access-7v6jq\") pod \"metallb-operator-webhook-server-d6dcdcfd6-bjcnh\" (UID: \"1323c35f-78a9-41e6-971c-0556d1bbdade\") " pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.302691 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.415323 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k"] Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.482812 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" event={"ID":"180820f2-a4dc-4daf-9e46-f065f20cb559","Type":"ContainerStarted","Data":"ea94f1587b9e9a2a122e8b76b93a70bf35c9fc4ebf00ce7f8a47b02fb9c59563"} Jan 28 12:38:25 crc kubenswrapper[4685]: I0128 12:38:25.527883 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh"] Jan 28 12:38:25 crc kubenswrapper[4685]: W0128 12:38:25.533749 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1323c35f_78a9_41e6_971c_0556d1bbdade.slice/crio-0041be44e5bc1fb8bbe7fbc373dc65a6b99e515c43c05e6720ed75d7c72dc8bd WatchSource:0}: Error finding container 0041be44e5bc1fb8bbe7fbc373dc65a6b99e515c43c05e6720ed75d7c72dc8bd: Status 404 returned error can't find the container with id 0041be44e5bc1fb8bbe7fbc373dc65a6b99e515c43c05e6720ed75d7c72dc8bd Jan 28 12:38:26 crc kubenswrapper[4685]: I0128 12:38:26.489291 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" event={"ID":"1323c35f-78a9-41e6-971c-0556d1bbdade","Type":"ContainerStarted","Data":"0041be44e5bc1fb8bbe7fbc373dc65a6b99e515c43c05e6720ed75d7c72dc8bd"} Jan 28 12:38:29 crc kubenswrapper[4685]: I0128 12:38:29.387045 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:29 crc kubenswrapper[4685]: I0128 12:38:29.449945 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:30 crc kubenswrapper[4685]: I0128 12:38:30.553561 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4p2sq"] Jan 28 12:38:30 crc kubenswrapper[4685]: I0128 12:38:30.553845 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4p2sq" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="registry-server" containerID="cri-o://61004529d97c39d4f9035a7ad02f216d49913d20a8958e0644099ba3a13dd925" gracePeriod=2 Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.041895 4685 generic.go:334] "Generic (PLEG): container finished" podID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerID="61004529d97c39d4f9035a7ad02f216d49913d20a8958e0644099ba3a13dd925" exitCode=0 Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.041992 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p2sq" event={"ID":"843e2c19-08d8-4669-9a4d-0260bd34d3cf","Type":"ContainerDied","Data":"61004529d97c39d4f9035a7ad02f216d49913d20a8958e0644099ba3a13dd925"} Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.869914 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.961218 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2srs\" (UniqueName: \"kubernetes.io/projected/843e2c19-08d8-4669-9a4d-0260bd34d3cf-kube-api-access-s2srs\") pod \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.961294 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-catalog-content\") pod \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.961337 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-utilities\") pod \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\" (UID: \"843e2c19-08d8-4669-9a4d-0260bd34d3cf\") " Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.962353 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-utilities" (OuterVolumeSpecName: "utilities") pod "843e2c19-08d8-4669-9a4d-0260bd34d3cf" (UID: "843e2c19-08d8-4669-9a4d-0260bd34d3cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:38:35 crc kubenswrapper[4685]: I0128 12:38:35.966982 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/843e2c19-08d8-4669-9a4d-0260bd34d3cf-kube-api-access-s2srs" (OuterVolumeSpecName: "kube-api-access-s2srs") pod "843e2c19-08d8-4669-9a4d-0260bd34d3cf" (UID: "843e2c19-08d8-4669-9a4d-0260bd34d3cf"). InnerVolumeSpecName "kube-api-access-s2srs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.055326 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p2sq" event={"ID":"843e2c19-08d8-4669-9a4d-0260bd34d3cf","Type":"ContainerDied","Data":"de35f5e5cbf2f86561e919f1cc69880aa8eb23e8f1628428a8c4b2ab55738b5e"} Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.055840 4685 scope.go:117] "RemoveContainer" containerID="61004529d97c39d4f9035a7ad02f216d49913d20a8958e0644099ba3a13dd925" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.055783 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p2sq" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.064406 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2srs\" (UniqueName: \"kubernetes.io/projected/843e2c19-08d8-4669-9a4d-0260bd34d3cf-kube-api-access-s2srs\") on node \"crc\" DevicePath \"\"" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.064455 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.079946 4685 scope.go:117] "RemoveContainer" containerID="6688582f5831c506d7099bba9fff9ce7441a9b06f866bd0659e6c917a2cd95b6" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.090385 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "843e2c19-08d8-4669-9a4d-0260bd34d3cf" (UID: "843e2c19-08d8-4669-9a4d-0260bd34d3cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.098316 4685 scope.go:117] "RemoveContainer" containerID="6b81a9fc75bd130903d64a640180c7505f380bd506837705473667b708dd925d" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.166005 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/843e2c19-08d8-4669-9a4d-0260bd34d3cf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.383522 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4p2sq"] Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.391294 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4p2sq"] Jan 28 12:38:36 crc kubenswrapper[4685]: I0128 12:38:36.552414 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" path="/var/lib/kubelet/pods/843e2c19-08d8-4669-9a4d-0260bd34d3cf/volumes" Jan 28 12:38:47 crc kubenswrapper[4685]: I0128 12:38:47.111732 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" event={"ID":"1323c35f-78a9-41e6-971c-0556d1bbdade","Type":"ContainerStarted","Data":"93cdb41f7a2b35d84756d1e7d1d33fcd55058b5dc0060701132a36ce84c5029e"} Jan 28 12:38:47 crc kubenswrapper[4685]: I0128 12:38:47.112303 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:38:47 crc kubenswrapper[4685]: I0128 12:38:47.113916 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" event={"ID":"180820f2-a4dc-4daf-9e46-f065f20cb559","Type":"ContainerStarted","Data":"bb1307bafcf340b1cf26285092281aa43415fe83ec84b46bce5e9cbe64cf5e74"} Jan 28 12:38:47 crc kubenswrapper[4685]: I0128 12:38:47.114090 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:38:47 crc kubenswrapper[4685]: I0128 12:38:47.144718 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" podStartSLOduration=3.349652227 podStartE2EDuration="23.144694957s" podCreationTimestamp="2026-01-28 12:38:24 +0000 UTC" firstStartedPulling="2026-01-28 12:38:25.536838526 +0000 UTC m=+1056.624252361" lastFinishedPulling="2026-01-28 12:38:45.331881246 +0000 UTC m=+1076.419295091" observedRunningTime="2026-01-28 12:38:47.138966475 +0000 UTC m=+1078.226380360" watchObservedRunningTime="2026-01-28 12:38:47.144694957 +0000 UTC m=+1078.232108812" Jan 28 12:38:47 crc kubenswrapper[4685]: I0128 12:38:47.172023 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" podStartSLOduration=3.304923138 podStartE2EDuration="23.172001633s" podCreationTimestamp="2026-01-28 12:38:24 +0000 UTC" firstStartedPulling="2026-01-28 12:38:25.427470751 +0000 UTC m=+1056.514884586" lastFinishedPulling="2026-01-28 12:38:45.294549206 +0000 UTC m=+1076.381963081" observedRunningTime="2026-01-28 12:38:47.167043772 +0000 UTC m=+1078.254457637" watchObservedRunningTime="2026-01-28 12:38:47.172001633 +0000 UTC m=+1078.259415488" Jan 28 12:39:05 crc kubenswrapper[4685]: I0128 12:39:05.307345 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-d6dcdcfd6-bjcnh" Jan 28 12:39:24 crc kubenswrapper[4685]: I0128 12:39:24.928054 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-686c4fd867-8j42k" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.703514 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-lfz8d"] Jan 28 12:39:25 crc kubenswrapper[4685]: E0128 12:39:25.703725 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="extract-utilities" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.703739 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="extract-utilities" Jan 28 12:39:25 crc kubenswrapper[4685]: E0128 12:39:25.703747 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="registry-server" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.703755 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="registry-server" Jan 28 12:39:25 crc kubenswrapper[4685]: E0128 12:39:25.703764 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="extract-content" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.703770 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="extract-content" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.703882 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="843e2c19-08d8-4669-9a4d-0260bd34d3cf" containerName="registry-server" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.705619 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.707959 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.708078 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9"] Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.708326 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-6m27t" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.708779 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.709481 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.710469 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.737445 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9"] Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.784261 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-l6dv2"] Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.785154 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.786825 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-6h4k9" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.787474 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.793215 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.793324 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.801413 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-qpvfn"] Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.802248 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.804538 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.815386 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-qpvfn"] Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.895733 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-metrics-certs\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.895781 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-metallb-excludel2\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.895806 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kljgw\" (UniqueName: \"kubernetes.io/projected/42db1da2-22b1-4020-92ee-e29273e09efa-kube-api-access-kljgw\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.895826 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42db1da2-22b1-4020-92ee-e29273e09efa-metrics-certs\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.895870 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkjnn\" (UniqueName: \"kubernetes.io/projected/5f6bbfce-281d-41ee-8911-98fd13d2cdf7-kube-api-access-bkjnn\") pod \"frr-k8s-webhook-server-7df86c4f6c-tr9p9\" (UID: \"5f6bbfce-281d-41ee-8911-98fd13d2cdf7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.896257 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.896817 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5f6bbfce-281d-41ee-8911-98fd13d2cdf7-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-tr9p9\" (UID: \"5f6bbfce-281d-41ee-8911-98fd13d2cdf7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.896904 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-metrics\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.896985 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-reloader\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.897055 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-frr-conf\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.897135 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/42db1da2-22b1-4020-92ee-e29273e09efa-frr-startup\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.897235 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdwb2\" (UniqueName: \"kubernetes.io/projected/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-kube-api-access-mdwb2\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.897325 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-frr-sockets\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.997756 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-metrics-certs\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998757 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-metallb-excludel2\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998800 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42db1da2-22b1-4020-92ee-e29273e09efa-metrics-certs\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998823 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kljgw\" (UniqueName: \"kubernetes.io/projected/42db1da2-22b1-4020-92ee-e29273e09efa-kube-api-access-kljgw\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998852 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkjnn\" (UniqueName: \"kubernetes.io/projected/5f6bbfce-281d-41ee-8911-98fd13d2cdf7-kube-api-access-bkjnn\") pod \"frr-k8s-webhook-server-7df86c4f6c-tr9p9\" (UID: \"5f6bbfce-281d-41ee-8911-98fd13d2cdf7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998884 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6d6j\" (UniqueName: \"kubernetes.io/projected/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-kube-api-access-p6d6j\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998920 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998955 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5f6bbfce-281d-41ee-8911-98fd13d2cdf7-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-tr9p9\" (UID: \"5f6bbfce-281d-41ee-8911-98fd13d2cdf7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.998979 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-metrics\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999003 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-metrics-certs\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999027 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-reloader\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999046 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-frr-conf\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999072 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/42db1da2-22b1-4020-92ee-e29273e09efa-frr-startup\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999100 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdwb2\" (UniqueName: \"kubernetes.io/projected/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-kube-api-access-mdwb2\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999292 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-frr-sockets\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999328 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-cert\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999514 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-frr-conf\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999607 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-metrics\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: I0128 12:39:25.999699 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-reloader\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:25 crc kubenswrapper[4685]: E0128 12:39:25.999943 4685 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 12:39:26 crc kubenswrapper[4685]: E0128 12:39:26.000005 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist podName:0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55 nodeName:}" failed. No retries permitted until 2026-01-28 12:39:26.499988234 +0000 UTC m=+1117.587402269 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist") pod "speaker-l6dv2" (UID: "0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55") : secret "metallb-memberlist" not found Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.000209 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-metallb-excludel2\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.000230 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/42db1da2-22b1-4020-92ee-e29273e09efa-frr-sockets\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.000519 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/42db1da2-22b1-4020-92ee-e29273e09efa-frr-startup\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.003768 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5f6bbfce-281d-41ee-8911-98fd13d2cdf7-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-tr9p9\" (UID: \"5f6bbfce-281d-41ee-8911-98fd13d2cdf7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.003774 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-metrics-certs\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.009035 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42db1da2-22b1-4020-92ee-e29273e09efa-metrics-certs\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.016579 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdwb2\" (UniqueName: \"kubernetes.io/projected/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-kube-api-access-mdwb2\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.017766 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kljgw\" (UniqueName: \"kubernetes.io/projected/42db1da2-22b1-4020-92ee-e29273e09efa-kube-api-access-kljgw\") pod \"frr-k8s-lfz8d\" (UID: \"42db1da2-22b1-4020-92ee-e29273e09efa\") " pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.023346 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.031198 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkjnn\" (UniqueName: \"kubernetes.io/projected/5f6bbfce-281d-41ee-8911-98fd13d2cdf7-kube-api-access-bkjnn\") pod \"frr-k8s-webhook-server-7df86c4f6c-tr9p9\" (UID: \"5f6bbfce-281d-41ee-8911-98fd13d2cdf7\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.033491 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.101888 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6d6j\" (UniqueName: \"kubernetes.io/projected/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-kube-api-access-p6d6j\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.101989 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-metrics-certs\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.102030 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-cert\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.106328 4685 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.107213 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-metrics-certs\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.116114 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-cert\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.120657 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6d6j\" (UniqueName: \"kubernetes.io/projected/e03e4d40-b0e2-41c8-aecc-2386d96fff4c-kube-api-access-p6d6j\") pod \"controller-6968d8fdc4-qpvfn\" (UID: \"e03e4d40-b0e2-41c8-aecc-2386d96fff4c\") " pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.420240 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.505957 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:26 crc kubenswrapper[4685]: E0128 12:39:26.506198 4685 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 12:39:26 crc kubenswrapper[4685]: E0128 12:39:26.506283 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist podName:0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55 nodeName:}" failed. No retries permitted until 2026-01-28 12:39:27.506261929 +0000 UTC m=+1118.593675764 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist") pod "speaker-l6dv2" (UID: "0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55") : secret "metallb-memberlist" not found Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.510235 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9"] Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.612598 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-qpvfn"] Jan 28 12:39:26 crc kubenswrapper[4685]: W0128 12:39:26.619978 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode03e4d40_b0e2_41c8_aecc_2386d96fff4c.slice/crio-b1fc9ee6456b65bf9d28a23800abb7f4403c860edd890648131242da0cb702ef WatchSource:0}: Error finding container b1fc9ee6456b65bf9d28a23800abb7f4403c860edd890648131242da0cb702ef: Status 404 returned error can't find the container with id b1fc9ee6456b65bf9d28a23800abb7f4403c860edd890648131242da0cb702ef Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.703680 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-qpvfn" event={"ID":"e03e4d40-b0e2-41c8-aecc-2386d96fff4c","Type":"ContainerStarted","Data":"b1fc9ee6456b65bf9d28a23800abb7f4403c860edd890648131242da0cb702ef"} Jan 28 12:39:26 crc kubenswrapper[4685]: I0128 12:39:26.704448 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" event={"ID":"5f6bbfce-281d-41ee-8911-98fd13d2cdf7","Type":"ContainerStarted","Data":"841383b8cad0e9d1b43abafcfd0a65b4a3e24c3632f4c29b92a903f99f7b1e93"} Jan 28 12:39:27 crc kubenswrapper[4685]: I0128 12:39:27.517084 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:27 crc kubenswrapper[4685]: E0128 12:39:27.517289 4685 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 12:39:27 crc kubenswrapper[4685]: E0128 12:39:27.517642 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist podName:0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55 nodeName:}" failed. No retries permitted until 2026-01-28 12:39:29.517618495 +0000 UTC m=+1120.605032330 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist") pod "speaker-l6dv2" (UID: "0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55") : secret "metallb-memberlist" not found Jan 28 12:39:27 crc kubenswrapper[4685]: I0128 12:39:27.710621 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-qpvfn" event={"ID":"e03e4d40-b0e2-41c8-aecc-2386d96fff4c","Type":"ContainerStarted","Data":"5d22fbafd932c81f614a9e9f077f6f4e8b0be8951dc0a326be4ecf73b9b09214"} Jan 28 12:39:27 crc kubenswrapper[4685]: I0128 12:39:27.711678 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerStarted","Data":"f14b0e8af0f7058b298d13c2d1d1c61b745d234a596114aaffab79ff11eec783"} Jan 28 12:39:29 crc kubenswrapper[4685]: I0128 12:39:29.545112 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:29 crc kubenswrapper[4685]: I0128 12:39:29.557094 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55-memberlist\") pod \"speaker-l6dv2\" (UID: \"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55\") " pod="metallb-system/speaker-l6dv2" Jan 28 12:39:29 crc kubenswrapper[4685]: I0128 12:39:29.712353 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-l6dv2" Jan 28 12:39:29 crc kubenswrapper[4685]: W0128 12:39:29.761796 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c9dfbc9_b8f0_4ba7_8ffa_72f9349c5d55.slice/crio-72b8a654b2934925c7fc1171d7651234f01be21ef0520130b5249d7b7247ac02 WatchSource:0}: Error finding container 72b8a654b2934925c7fc1171d7651234f01be21ef0520130b5249d7b7247ac02: Status 404 returned error can't find the container with id 72b8a654b2934925c7fc1171d7651234f01be21ef0520130b5249d7b7247ac02 Jan 28 12:39:30 crc kubenswrapper[4685]: I0128 12:39:30.736775 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-l6dv2" event={"ID":"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55","Type":"ContainerStarted","Data":"ba4df077cb4348dc4696063357f292157c2528f055953ec74ab469da535d532d"} Jan 28 12:39:30 crc kubenswrapper[4685]: I0128 12:39:30.737094 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-l6dv2" event={"ID":"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55","Type":"ContainerStarted","Data":"72b8a654b2934925c7fc1171d7651234f01be21ef0520130b5249d7b7247ac02"} Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.782974 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-qpvfn" event={"ID":"e03e4d40-b0e2-41c8-aecc-2386d96fff4c","Type":"ContainerStarted","Data":"66884939e1343da4fafb8794aaa2c2c31ad65873211cd34ca6f316e46f16f036"} Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.783648 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.785922 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-l6dv2" event={"ID":"0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55","Type":"ContainerStarted","Data":"bc59e73eba7319ab1200c9532535824565a28ce640a13fabe69d5fff6307ec35"} Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.786048 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-l6dv2" Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.787039 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-qpvfn" Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.788157 4685 generic.go:334] "Generic (PLEG): container finished" podID="42db1da2-22b1-4020-92ee-e29273e09efa" containerID="5a91b88c3e1e8e6493793f46161e60ea8b62b50c37a2f8aae9e4b7756c2f5f6c" exitCode=0 Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.788268 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerDied","Data":"5a91b88c3e1e8e6493793f46161e60ea8b62b50c37a2f8aae9e4b7756c2f5f6c"} Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.790605 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" event={"ID":"5f6bbfce-281d-41ee-8911-98fd13d2cdf7","Type":"ContainerStarted","Data":"7c14bfcd94bb9b332d024168207ee36962bdb77146491ef41aa0810da9c28ab2"} Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.791322 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.806256 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-qpvfn" podStartSLOduration=3.599123282 podStartE2EDuration="12.806220499s" podCreationTimestamp="2026-01-28 12:39:25 +0000 UTC" firstStartedPulling="2026-01-28 12:39:27.520582399 +0000 UTC m=+1118.607996234" lastFinishedPulling="2026-01-28 12:39:36.727679616 +0000 UTC m=+1127.815093451" observedRunningTime="2026-01-28 12:39:37.80026644 +0000 UTC m=+1128.887680315" watchObservedRunningTime="2026-01-28 12:39:37.806220499 +0000 UTC m=+1128.893634344" Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.858891 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" podStartSLOduration=2.651201838 podStartE2EDuration="12.858871944s" podCreationTimestamp="2026-01-28 12:39:25 +0000 UTC" firstStartedPulling="2026-01-28 12:39:26.519393662 +0000 UTC m=+1117.606807497" lastFinishedPulling="2026-01-28 12:39:36.727063768 +0000 UTC m=+1127.814477603" observedRunningTime="2026-01-28 12:39:37.855122217 +0000 UTC m=+1128.942536072" watchObservedRunningTime="2026-01-28 12:39:37.858871944 +0000 UTC m=+1128.946285789" Jan 28 12:39:37 crc kubenswrapper[4685]: I0128 12:39:37.882423 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-l6dv2" podStartSLOduration=6.936911812 podStartE2EDuration="12.882391002s" podCreationTimestamp="2026-01-28 12:39:25 +0000 UTC" firstStartedPulling="2026-01-28 12:39:31.00919119 +0000 UTC m=+1122.096605025" lastFinishedPulling="2026-01-28 12:39:36.95467038 +0000 UTC m=+1128.042084215" observedRunningTime="2026-01-28 12:39:37.875571298 +0000 UTC m=+1128.962985133" watchObservedRunningTime="2026-01-28 12:39:37.882391002 +0000 UTC m=+1128.969804837" Jan 28 12:39:38 crc kubenswrapper[4685]: I0128 12:39:38.797326 4685 generic.go:334] "Generic (PLEG): container finished" podID="42db1da2-22b1-4020-92ee-e29273e09efa" containerID="2db9ecba393e3eb474e3b2be4544c6a93e442c9289ac0cbcfc6bb137407993d6" exitCode=0 Jan 28 12:39:38 crc kubenswrapper[4685]: I0128 12:39:38.797439 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerDied","Data":"2db9ecba393e3eb474e3b2be4544c6a93e442c9289ac0cbcfc6bb137407993d6"} Jan 28 12:39:39 crc kubenswrapper[4685]: I0128 12:39:39.805856 4685 generic.go:334] "Generic (PLEG): container finished" podID="42db1da2-22b1-4020-92ee-e29273e09efa" containerID="50db564e067f5c5838fd78bb2cd92fb905686c0cfe28a18aff046c17aadf5077" exitCode=0 Jan 28 12:39:39 crc kubenswrapper[4685]: I0128 12:39:39.805921 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerDied","Data":"50db564e067f5c5838fd78bb2cd92fb905686c0cfe28a18aff046c17aadf5077"} Jan 28 12:39:40 crc kubenswrapper[4685]: I0128 12:39:40.814324 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerStarted","Data":"a416ca8f4345bf1c0d5a0c35cb0ad1bc6260854d8f0f476ffcd7a1250c9cab51"} Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.192101 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fw6nv"] Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.193610 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.201733 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fw6nv"] Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.340237 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hw5z\" (UniqueName: \"kubernetes.io/projected/c394761c-3584-4fe7-9665-e0d78f41b3a7-kube-api-access-2hw5z\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.340339 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-utilities\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.340433 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-catalog-content\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.441912 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hw5z\" (UniqueName: \"kubernetes.io/projected/c394761c-3584-4fe7-9665-e0d78f41b3a7-kube-api-access-2hw5z\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.441977 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-utilities\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.442062 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-catalog-content\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.442499 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-catalog-content\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.447420 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-utilities\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.473852 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hw5z\" (UniqueName: \"kubernetes.io/projected/c394761c-3584-4fe7-9665-e0d78f41b3a7-kube-api-access-2hw5z\") pod \"redhat-marketplace-fw6nv\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.528075 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.822722 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerStarted","Data":"ce843f42fc595de8fa7996f4c9ff720e263af3f5a2200605822c95e63a9edcb1"} Jan 28 12:39:41 crc kubenswrapper[4685]: I0128 12:39:41.930801 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fw6nv"] Jan 28 12:39:41 crc kubenswrapper[4685]: W0128 12:39:41.937491 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc394761c_3584_4fe7_9665_e0d78f41b3a7.slice/crio-d7c7e328bd8835347779d2bb9632cf9378967fd31f93645574f5ef811ed2da57 WatchSource:0}: Error finding container d7c7e328bd8835347779d2bb9632cf9378967fd31f93645574f5ef811ed2da57: Status 404 returned error can't find the container with id d7c7e328bd8835347779d2bb9632cf9378967fd31f93645574f5ef811ed2da57 Jan 28 12:39:42 crc kubenswrapper[4685]: I0128 12:39:42.829732 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fw6nv" event={"ID":"c394761c-3584-4fe7-9665-e0d78f41b3a7","Type":"ContainerStarted","Data":"d7c7e328bd8835347779d2bb9632cf9378967fd31f93645574f5ef811ed2da57"} Jan 28 12:39:43 crc kubenswrapper[4685]: I0128 12:39:43.842609 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerStarted","Data":"f6d173ba6e95c9fd1e4313fb6b425d10891f725b09752cdf39d3294c18d3407c"} Jan 28 12:39:44 crc kubenswrapper[4685]: I0128 12:39:44.849880 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fw6nv" event={"ID":"c394761c-3584-4fe7-9665-e0d78f41b3a7","Type":"ContainerStarted","Data":"a786e48620af167f60b2dbbc0c4c9a32908c457d03301fcfdbec54f4c07a4893"} Jan 28 12:39:45 crc kubenswrapper[4685]: I0128 12:39:45.868605 4685 generic.go:334] "Generic (PLEG): container finished" podID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerID="a786e48620af167f60b2dbbc0c4c9a32908c457d03301fcfdbec54f4c07a4893" exitCode=0 Jan 28 12:39:45 crc kubenswrapper[4685]: I0128 12:39:45.868659 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fw6nv" event={"ID":"c394761c-3584-4fe7-9665-e0d78f41b3a7","Type":"ContainerDied","Data":"a786e48620af167f60b2dbbc0c4c9a32908c457d03301fcfdbec54f4c07a4893"} Jan 28 12:39:46 crc kubenswrapper[4685]: I0128 12:39:46.041164 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-tr9p9" Jan 28 12:39:49 crc kubenswrapper[4685]: I0128 12:39:49.718053 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-l6dv2" Jan 28 12:39:49 crc kubenswrapper[4685]: I0128 12:39:49.903728 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerStarted","Data":"8799e2f5b4ee71dfd73d5c74f293ad21ee19f05f102be905ea56fa52c977d04c"} Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.485725 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9ww8x"] Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.487348 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.500377 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9ww8x"] Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.604961 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f21418ca-c434-4aec-95f1-6be063d5f927-catalog-content\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.605309 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f21418ca-c434-4aec-95f1-6be063d5f927-utilities\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.605340 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6gtl\" (UniqueName: \"kubernetes.io/projected/f21418ca-c434-4aec-95f1-6be063d5f927-kube-api-access-v6gtl\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.706453 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f21418ca-c434-4aec-95f1-6be063d5f927-catalog-content\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.706503 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f21418ca-c434-4aec-95f1-6be063d5f927-utilities\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.706530 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6gtl\" (UniqueName: \"kubernetes.io/projected/f21418ca-c434-4aec-95f1-6be063d5f927-kube-api-access-v6gtl\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.706965 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f21418ca-c434-4aec-95f1-6be063d5f927-catalog-content\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.706992 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f21418ca-c434-4aec-95f1-6be063d5f927-utilities\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.728830 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6gtl\" (UniqueName: \"kubernetes.io/projected/f21418ca-c434-4aec-95f1-6be063d5f927-kube-api-access-v6gtl\") pod \"certified-operators-9ww8x\" (UID: \"f21418ca-c434-4aec-95f1-6be063d5f927\") " pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.848317 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:39:56 crc kubenswrapper[4685]: I0128 12:39:56.961593 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerStarted","Data":"cc8dd980fc7a27d7dbff6d6511ff263d95c90938323cbea99185fd4156007a18"} Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.070644 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.071005 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.342490 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9ww8x"] Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.968206 4685 generic.go:334] "Generic (PLEG): container finished" podID="f21418ca-c434-4aec-95f1-6be063d5f927" containerID="4ec6fc1197fe067c9e8ff8bb3d40afae645696305e58b30f7acd290575fffde0" exitCode=0 Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.968319 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9ww8x" event={"ID":"f21418ca-c434-4aec-95f1-6be063d5f927","Type":"ContainerDied","Data":"4ec6fc1197fe067c9e8ff8bb3d40afae645696305e58b30f7acd290575fffde0"} Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.968609 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9ww8x" event={"ID":"f21418ca-c434-4aec-95f1-6be063d5f927","Type":"ContainerStarted","Data":"a22d2d8d46b73134e11dd078490c5abf5b79d7ae570cb8d806f8051bfce4374e"} Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.972937 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lfz8d" event={"ID":"42db1da2-22b1-4020-92ee-e29273e09efa","Type":"ContainerStarted","Data":"ae233831313980df3116b12a28db9e138bfc1743cc642097839b4c210bde988b"} Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.973186 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:57 crc kubenswrapper[4685]: I0128 12:39:57.975432 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:39:58 crc kubenswrapper[4685]: I0128 12:39:58.007812 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-lfz8d" podStartSLOduration=23.563931203 podStartE2EDuration="33.007793732s" podCreationTimestamp="2026-01-28 12:39:25 +0000 UTC" firstStartedPulling="2026-01-28 12:39:27.316448893 +0000 UTC m=+1118.403862728" lastFinishedPulling="2026-01-28 12:39:36.760311432 +0000 UTC m=+1127.847725257" observedRunningTime="2026-01-28 12:39:58.002754909 +0000 UTC m=+1149.090168754" watchObservedRunningTime="2026-01-28 12:39:58.007793732 +0000 UTC m=+1149.095207567" Jan 28 12:39:59 crc kubenswrapper[4685]: I0128 12:39:59.990574 4685 generic.go:334] "Generic (PLEG): container finished" podID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerID="648cc0d3b7244d0503862c12f4b214100c17fe7cbee41af443ce89d878065827" exitCode=0 Jan 28 12:39:59 crc kubenswrapper[4685]: I0128 12:39:59.990650 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fw6nv" event={"ID":"c394761c-3584-4fe7-9665-e0d78f41b3a7","Type":"ContainerDied","Data":"648cc0d3b7244d0503862c12f4b214100c17fe7cbee41af443ce89d878065827"} Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.279281 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-r6b6g"] Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.280687 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.283266 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-index-dockercfg-d2hx2" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.285223 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.285223 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.286234 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-r6b6g"] Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.350581 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxklw\" (UniqueName: \"kubernetes.io/projected/5c808be2-bfa1-446b-adc0-1066d556746e-kube-api-access-sxklw\") pod \"mariadb-operator-index-r6b6g\" (UID: \"5c808be2-bfa1-446b-adc0-1066d556746e\") " pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.451629 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxklw\" (UniqueName: \"kubernetes.io/projected/5c808be2-bfa1-446b-adc0-1066d556746e-kube-api-access-sxklw\") pod \"mariadb-operator-index-r6b6g\" (UID: \"5c808be2-bfa1-446b-adc0-1066d556746e\") " pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.480531 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxklw\" (UniqueName: \"kubernetes.io/projected/5c808be2-bfa1-446b-adc0-1066d556746e-kube-api-access-sxklw\") pod \"mariadb-operator-index-r6b6g\" (UID: \"5c808be2-bfa1-446b-adc0-1066d556746e\") " pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.600246 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.845791 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-r6b6g"] Jan 28 12:40:00 crc kubenswrapper[4685]: W0128 12:40:00.850860 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c808be2_bfa1_446b_adc0_1066d556746e.slice/crio-4a3d091b53000f3841f1f268a24748968c1e36bd6949bd66c33395ac6b1590da WatchSource:0}: Error finding container 4a3d091b53000f3841f1f268a24748968c1e36bd6949bd66c33395ac6b1590da: Status 404 returned error can't find the container with id 4a3d091b53000f3841f1f268a24748968c1e36bd6949bd66c33395ac6b1590da Jan 28 12:40:00 crc kubenswrapper[4685]: I0128 12:40:00.997247 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-r6b6g" event={"ID":"5c808be2-bfa1-446b-adc0-1066d556746e","Type":"ContainerStarted","Data":"4a3d091b53000f3841f1f268a24748968c1e36bd6949bd66c33395ac6b1590da"} Jan 28 12:40:01 crc kubenswrapper[4685]: I0128 12:40:01.024564 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:40:01 crc kubenswrapper[4685]: I0128 12:40:01.073582 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-lfz8d" Jan 28 12:40:05 crc kubenswrapper[4685]: I0128 12:40:05.024181 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fw6nv" event={"ID":"c394761c-3584-4fe7-9665-e0d78f41b3a7","Type":"ContainerStarted","Data":"18a7a82db748646526bce255cb7fe2f31906767dd51f5661210483f6dc86b71b"} Jan 28 12:40:05 crc kubenswrapper[4685]: I0128 12:40:05.045140 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fw6nv" podStartSLOduration=9.757536486 podStartE2EDuration="24.045124822s" podCreationTimestamp="2026-01-28 12:39:41 +0000 UTC" firstStartedPulling="2026-01-28 12:39:49.904874826 +0000 UTC m=+1140.992288661" lastFinishedPulling="2026-01-28 12:40:04.192463162 +0000 UTC m=+1155.279876997" observedRunningTime="2026-01-28 12:40:05.041374975 +0000 UTC m=+1156.128788810" watchObservedRunningTime="2026-01-28 12:40:05.045124822 +0000 UTC m=+1156.132538657" Jan 28 12:40:10 crc kubenswrapper[4685]: I0128 12:40:10.060870 4685 generic.go:334] "Generic (PLEG): container finished" podID="f21418ca-c434-4aec-95f1-6be063d5f927" containerID="eec0b31c7b49940a9c437af5e0b5667afdaa8a0992c4b43910d50f64f4eb9b9d" exitCode=0 Jan 28 12:40:10 crc kubenswrapper[4685]: I0128 12:40:10.061088 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9ww8x" event={"ID":"f21418ca-c434-4aec-95f1-6be063d5f927","Type":"ContainerDied","Data":"eec0b31c7b49940a9c437af5e0b5667afdaa8a0992c4b43910d50f64f4eb9b9d"} Jan 28 12:40:11 crc kubenswrapper[4685]: I0128 12:40:11.528331 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:40:11 crc kubenswrapper[4685]: I0128 12:40:11.528397 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:40:11 crc kubenswrapper[4685]: I0128 12:40:11.585115 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:40:12 crc kubenswrapper[4685]: I0128 12:40:12.117158 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:40:14 crc kubenswrapper[4685]: I0128 12:40:14.085453 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-r6b6g" event={"ID":"5c808be2-bfa1-446b-adc0-1066d556746e","Type":"ContainerStarted","Data":"e32fa465db61f02773fecd45d1a8f1783017d1bcfd0f5025512586785736346a"} Jan 28 12:40:14 crc kubenswrapper[4685]: I0128 12:40:14.088460 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9ww8x" event={"ID":"f21418ca-c434-4aec-95f1-6be063d5f927","Type":"ContainerStarted","Data":"95ac21ccba78b78b786e0d6c3e33482e3f1bd6a016dee935d43f9699c4a1fb08"} Jan 28 12:40:14 crc kubenswrapper[4685]: I0128 12:40:14.120051 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-r6b6g" podStartSLOduration=2.51448993 podStartE2EDuration="14.120033706s" podCreationTimestamp="2026-01-28 12:40:00 +0000 UTC" firstStartedPulling="2026-01-28 12:40:00.853993823 +0000 UTC m=+1151.941407658" lastFinishedPulling="2026-01-28 12:40:12.459537569 +0000 UTC m=+1163.546951434" observedRunningTime="2026-01-28 12:40:14.101204601 +0000 UTC m=+1165.188618436" watchObservedRunningTime="2026-01-28 12:40:14.120033706 +0000 UTC m=+1165.207447541" Jan 28 12:40:14 crc kubenswrapper[4685]: I0128 12:40:14.120661 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9ww8x" podStartSLOduration=3.80814854 podStartE2EDuration="18.120654903s" podCreationTimestamp="2026-01-28 12:39:56 +0000 UTC" firstStartedPulling="2026-01-28 12:39:58.146917842 +0000 UTC m=+1149.234331677" lastFinishedPulling="2026-01-28 12:40:12.459424195 +0000 UTC m=+1163.546838040" observedRunningTime="2026-01-28 12:40:14.118520373 +0000 UTC m=+1165.205934208" watchObservedRunningTime="2026-01-28 12:40:14.120654903 +0000 UTC m=+1165.208068738" Jan 28 12:40:16 crc kubenswrapper[4685]: I0128 12:40:16.262236 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fw6nv"] Jan 28 12:40:16 crc kubenswrapper[4685]: I0128 12:40:16.262763 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fw6nv" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="registry-server" containerID="cri-o://18a7a82db748646526bce255cb7fe2f31906767dd51f5661210483f6dc86b71b" gracePeriod=2 Jan 28 12:40:16 crc kubenswrapper[4685]: I0128 12:40:16.848885 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:40:16 crc kubenswrapper[4685]: I0128 12:40:16.848946 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:40:16 crc kubenswrapper[4685]: I0128 12:40:16.889842 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.110519 4685 generic.go:334] "Generic (PLEG): container finished" podID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerID="18a7a82db748646526bce255cb7fe2f31906767dd51f5661210483f6dc86b71b" exitCode=0 Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.110883 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fw6nv" event={"ID":"c394761c-3584-4fe7-9665-e0d78f41b3a7","Type":"ContainerDied","Data":"18a7a82db748646526bce255cb7fe2f31906767dd51f5661210483f6dc86b71b"} Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.216727 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.278272 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-utilities\") pod \"c394761c-3584-4fe7-9665-e0d78f41b3a7\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.278316 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hw5z\" (UniqueName: \"kubernetes.io/projected/c394761c-3584-4fe7-9665-e0d78f41b3a7-kube-api-access-2hw5z\") pod \"c394761c-3584-4fe7-9665-e0d78f41b3a7\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.278354 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-catalog-content\") pod \"c394761c-3584-4fe7-9665-e0d78f41b3a7\" (UID: \"c394761c-3584-4fe7-9665-e0d78f41b3a7\") " Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.279442 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-utilities" (OuterVolumeSpecName: "utilities") pod "c394761c-3584-4fe7-9665-e0d78f41b3a7" (UID: "c394761c-3584-4fe7-9665-e0d78f41b3a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.283645 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c394761c-3584-4fe7-9665-e0d78f41b3a7-kube-api-access-2hw5z" (OuterVolumeSpecName: "kube-api-access-2hw5z") pod "c394761c-3584-4fe7-9665-e0d78f41b3a7" (UID: "c394761c-3584-4fe7-9665-e0d78f41b3a7"). InnerVolumeSpecName "kube-api-access-2hw5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.298639 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c394761c-3584-4fe7-9665-e0d78f41b3a7" (UID: "c394761c-3584-4fe7-9665-e0d78f41b3a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.379567 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.379595 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hw5z\" (UniqueName: \"kubernetes.io/projected/c394761c-3584-4fe7-9665-e0d78f41b3a7-kube-api-access-2hw5z\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:17 crc kubenswrapper[4685]: I0128 12:40:17.379606 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c394761c-3584-4fe7-9665-e0d78f41b3a7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.120896 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fw6nv" event={"ID":"c394761c-3584-4fe7-9665-e0d78f41b3a7","Type":"ContainerDied","Data":"d7c7e328bd8835347779d2bb9632cf9378967fd31f93645574f5ef811ed2da57"} Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.120941 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fw6nv" Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.121020 4685 scope.go:117] "RemoveContainer" containerID="18a7a82db748646526bce255cb7fe2f31906767dd51f5661210483f6dc86b71b" Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.153042 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fw6nv"] Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.158019 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fw6nv"] Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.165484 4685 scope.go:117] "RemoveContainer" containerID="648cc0d3b7244d0503862c12f4b214100c17fe7cbee41af443ce89d878065827" Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.195052 4685 scope.go:117] "RemoveContainer" containerID="a786e48620af167f60b2dbbc0c4c9a32908c457d03301fcfdbec54f4c07a4893" Jan 28 12:40:18 crc kubenswrapper[4685]: I0128 12:40:18.553195 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" path="/var/lib/kubelet/pods/c394761c-3584-4fe7-9665-e0d78f41b3a7/volumes" Jan 28 12:40:20 crc kubenswrapper[4685]: I0128 12:40:20.601071 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:20 crc kubenswrapper[4685]: I0128 12:40:20.601123 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:20 crc kubenswrapper[4685]: I0128 12:40:20.631376 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:21 crc kubenswrapper[4685]: I0128 12:40:21.174150 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-index-r6b6g" Jan 28 12:40:26 crc kubenswrapper[4685]: I0128 12:40:26.900579 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9ww8x" Jan 28 12:40:26 crc kubenswrapper[4685]: I0128 12:40:26.975973 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9ww8x"] Jan 28 12:40:27 crc kubenswrapper[4685]: I0128 12:40:27.036702 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kh692"] Jan 28 12:40:27 crc kubenswrapper[4685]: I0128 12:40:27.036973 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kh692" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="registry-server" containerID="cri-o://25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" gracePeriod=2 Jan 28 12:40:27 crc kubenswrapper[4685]: I0128 12:40:27.070464 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:40:27 crc kubenswrapper[4685]: I0128 12:40:27.070545 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:40:27 crc kubenswrapper[4685]: E0128 12:40:27.528535 4685 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36 is running failed: container process not found" containerID="25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:40:27 crc kubenswrapper[4685]: E0128 12:40:27.529156 4685 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36 is running failed: container process not found" containerID="25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:40:27 crc kubenswrapper[4685]: E0128 12:40:27.529498 4685 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36 is running failed: container process not found" containerID="25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 12:40:27 crc kubenswrapper[4685]: E0128 12:40:27.529539 4685 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-kh692" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="registry-server" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.062343 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.158394 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-utilities\") pod \"d5738525-4c41-42c5-85d3-afce7539f7c8\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.158555 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-catalog-content\") pod \"d5738525-4c41-42c5-85d3-afce7539f7c8\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.158608 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnhp2\" (UniqueName: \"kubernetes.io/projected/d5738525-4c41-42c5-85d3-afce7539f7c8-kube-api-access-lnhp2\") pod \"d5738525-4c41-42c5-85d3-afce7539f7c8\" (UID: \"d5738525-4c41-42c5-85d3-afce7539f7c8\") " Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.159283 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-utilities" (OuterVolumeSpecName: "utilities") pod "d5738525-4c41-42c5-85d3-afce7539f7c8" (UID: "d5738525-4c41-42c5-85d3-afce7539f7c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.168024 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5738525-4c41-42c5-85d3-afce7539f7c8-kube-api-access-lnhp2" (OuterVolumeSpecName: "kube-api-access-lnhp2") pod "d5738525-4c41-42c5-85d3-afce7539f7c8" (UID: "d5738525-4c41-42c5-85d3-afce7539f7c8"). InnerVolumeSpecName "kube-api-access-lnhp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.187998 4685 generic.go:334] "Generic (PLEG): container finished" podID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerID="25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" exitCode=0 Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.188040 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh692" event={"ID":"d5738525-4c41-42c5-85d3-afce7539f7c8","Type":"ContainerDied","Data":"25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36"} Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.188066 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh692" event={"ID":"d5738525-4c41-42c5-85d3-afce7539f7c8","Type":"ContainerDied","Data":"e8b2cdfb3944f40b025595831a566b53a999c1f0dcfe1da9eb67e5ce87f91ce9"} Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.188084 4685 scope.go:117] "RemoveContainer" containerID="25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.188206 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh692" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.210344 4685 scope.go:117] "RemoveContainer" containerID="fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.214148 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d5738525-4c41-42c5-85d3-afce7539f7c8" (UID: "d5738525-4c41-42c5-85d3-afce7539f7c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.242010 4685 scope.go:117] "RemoveContainer" containerID="b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.260092 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.260121 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5738525-4c41-42c5-85d3-afce7539f7c8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.260130 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnhp2\" (UniqueName: \"kubernetes.io/projected/d5738525-4c41-42c5-85d3-afce7539f7c8-kube-api-access-lnhp2\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.267326 4685 scope.go:117] "RemoveContainer" containerID="25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" Jan 28 12:40:28 crc kubenswrapper[4685]: E0128 12:40:28.267919 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36\": container with ID starting with 25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36 not found: ID does not exist" containerID="25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.267966 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36"} err="failed to get container status \"25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36\": rpc error: code = NotFound desc = could not find container \"25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36\": container with ID starting with 25d9e981176d3b9fe2ef60d850e0f5e5bc7cdf3c10ff362f205a7a18e2017c36 not found: ID does not exist" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.267998 4685 scope.go:117] "RemoveContainer" containerID="fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890" Jan 28 12:40:28 crc kubenswrapper[4685]: E0128 12:40:28.268543 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890\": container with ID starting with fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890 not found: ID does not exist" containerID="fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.268583 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890"} err="failed to get container status \"fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890\": rpc error: code = NotFound desc = could not find container \"fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890\": container with ID starting with fe61c7d2cd6be7ecdfd39bc6e86b64310d8cae8a18559cdd4e6cd3b2251d6890 not found: ID does not exist" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.268611 4685 scope.go:117] "RemoveContainer" containerID="b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41" Jan 28 12:40:28 crc kubenswrapper[4685]: E0128 12:40:28.268894 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41\": container with ID starting with b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41 not found: ID does not exist" containerID="b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.268924 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41"} err="failed to get container status \"b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41\": rpc error: code = NotFound desc = could not find container \"b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41\": container with ID starting with b849569ed623081ccefe47b0be8910320feea45e1ec6038aa1703c08fc24ff41 not found: ID does not exist" Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.515803 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kh692"] Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.524704 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kh692"] Jan 28 12:40:28 crc kubenswrapper[4685]: I0128 12:40:28.552476 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" path="/var/lib/kubelet/pods/d5738525-4c41-42c5-85d3-afce7539f7c8/volumes" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.523314 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m"] Jan 28 12:40:30 crc kubenswrapper[4685]: E0128 12:40:30.524818 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="registry-server" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.524842 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="registry-server" Jan 28 12:40:30 crc kubenswrapper[4685]: E0128 12:40:30.524866 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="extract-content" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.524875 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="extract-content" Jan 28 12:40:30 crc kubenswrapper[4685]: E0128 12:40:30.524898 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="extract-utilities" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.524907 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="extract-utilities" Jan 28 12:40:30 crc kubenswrapper[4685]: E0128 12:40:30.524938 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="extract-content" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.524947 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="extract-content" Jan 28 12:40:30 crc kubenswrapper[4685]: E0128 12:40:30.524964 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="extract-utilities" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.524974 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="extract-utilities" Jan 28 12:40:30 crc kubenswrapper[4685]: E0128 12:40:30.524997 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="registry-server" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.525009 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="registry-server" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.525479 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="c394761c-3584-4fe7-9665-e0d78f41b3a7" containerName="registry-server" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.525513 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5738525-4c41-42c5-85d3-afce7539f7c8" containerName="registry-server" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.527749 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.532353 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-dvgnb" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.543462 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m"] Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.588137 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.588296 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z599v\" (UniqueName: \"kubernetes.io/projected/c588671a-6c05-4991-9345-f9bc8724b0c7-kube-api-access-z599v\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.588324 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.689667 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.689752 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z599v\" (UniqueName: \"kubernetes.io/projected/c588671a-6c05-4991-9345-f9bc8724b0c7-kube-api-access-z599v\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.689778 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.690346 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.690636 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.707959 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z599v\" (UniqueName: \"kubernetes.io/projected/c588671a-6c05-4991-9345-f9bc8724b0c7-kube-api-access-z599v\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:30 crc kubenswrapper[4685]: I0128 12:40:30.850529 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:31 crc kubenswrapper[4685]: I0128 12:40:31.311257 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m"] Jan 28 12:40:32 crc kubenswrapper[4685]: I0128 12:40:32.216359 4685 generic.go:334] "Generic (PLEG): container finished" podID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerID="1a739056da70c7bba459d4bc081937bc2ed6f0f1e193e01fe923cb7de54ab321" exitCode=0 Jan 28 12:40:32 crc kubenswrapper[4685]: I0128 12:40:32.216416 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" event={"ID":"c588671a-6c05-4991-9345-f9bc8724b0c7","Type":"ContainerDied","Data":"1a739056da70c7bba459d4bc081937bc2ed6f0f1e193e01fe923cb7de54ab321"} Jan 28 12:40:32 crc kubenswrapper[4685]: I0128 12:40:32.216452 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" event={"ID":"c588671a-6c05-4991-9345-f9bc8724b0c7","Type":"ContainerStarted","Data":"6a62144500e8e8af81e3454b46f6d53e16dce655833a8bf15efafe2830d5866c"} Jan 28 12:40:34 crc kubenswrapper[4685]: I0128 12:40:34.235672 4685 generic.go:334] "Generic (PLEG): container finished" podID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerID="4acb1eb9ef220b151ef72ef0eed7e05618503f2f1771f60975e8eb9a7232497e" exitCode=0 Jan 28 12:40:34 crc kubenswrapper[4685]: I0128 12:40:34.235805 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" event={"ID":"c588671a-6c05-4991-9345-f9bc8724b0c7","Type":"ContainerDied","Data":"4acb1eb9ef220b151ef72ef0eed7e05618503f2f1771f60975e8eb9a7232497e"} Jan 28 12:40:35 crc kubenswrapper[4685]: I0128 12:40:35.246651 4685 generic.go:334] "Generic (PLEG): container finished" podID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerID="f618b3430ffa04b36e8110543e415e5f9bc0750dc9cb07d02a1ff84ac4943810" exitCode=0 Jan 28 12:40:35 crc kubenswrapper[4685]: I0128 12:40:35.246698 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" event={"ID":"c588671a-6c05-4991-9345-f9bc8724b0c7","Type":"ContainerDied","Data":"f618b3430ffa04b36e8110543e415e5f9bc0750dc9cb07d02a1ff84ac4943810"} Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.503771 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.562288 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-util\") pod \"c588671a-6c05-4991-9345-f9bc8724b0c7\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.562336 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-bundle\") pod \"c588671a-6c05-4991-9345-f9bc8724b0c7\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.562378 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z599v\" (UniqueName: \"kubernetes.io/projected/c588671a-6c05-4991-9345-f9bc8724b0c7-kube-api-access-z599v\") pod \"c588671a-6c05-4991-9345-f9bc8724b0c7\" (UID: \"c588671a-6c05-4991-9345-f9bc8724b0c7\") " Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.563692 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-bundle" (OuterVolumeSpecName: "bundle") pod "c588671a-6c05-4991-9345-f9bc8724b0c7" (UID: "c588671a-6c05-4991-9345-f9bc8724b0c7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.568855 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c588671a-6c05-4991-9345-f9bc8724b0c7-kube-api-access-z599v" (OuterVolumeSpecName: "kube-api-access-z599v") pod "c588671a-6c05-4991-9345-f9bc8724b0c7" (UID: "c588671a-6c05-4991-9345-f9bc8724b0c7"). InnerVolumeSpecName "kube-api-access-z599v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.580379 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-util" (OuterVolumeSpecName: "util") pod "c588671a-6c05-4991-9345-f9bc8724b0c7" (UID: "c588671a-6c05-4991-9345-f9bc8724b0c7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.663912 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.663942 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c588671a-6c05-4991-9345-f9bc8724b0c7-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:36 crc kubenswrapper[4685]: I0128 12:40:36.663953 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z599v\" (UniqueName: \"kubernetes.io/projected/c588671a-6c05-4991-9345-f9bc8724b0c7-kube-api-access-z599v\") on node \"crc\" DevicePath \"\"" Jan 28 12:40:37 crc kubenswrapper[4685]: I0128 12:40:37.262864 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" event={"ID":"c588671a-6c05-4991-9345-f9bc8724b0c7","Type":"ContainerDied","Data":"6a62144500e8e8af81e3454b46f6d53e16dce655833a8bf15efafe2830d5866c"} Jan 28 12:40:37 crc kubenswrapper[4685]: I0128 12:40:37.263228 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a62144500e8e8af81e3454b46f6d53e16dce655833a8bf15efafe2830d5866c" Jan 28 12:40:37 crc kubenswrapper[4685]: I0128 12:40:37.262934 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.868302 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8"] Jan 28 12:40:43 crc kubenswrapper[4685]: E0128 12:40:43.869616 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerName="extract" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.869692 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerName="extract" Jan 28 12:40:43 crc kubenswrapper[4685]: E0128 12:40:43.869746 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerName="util" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.869820 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerName="util" Jan 28 12:40:43 crc kubenswrapper[4685]: E0128 12:40:43.869877 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerName="pull" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.869926 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerName="pull" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.870161 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="c588671a-6c05-4991-9345-f9bc8724b0c7" containerName="extract" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.871833 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.885781 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.886378 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-service-cert" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.886676 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-cn9js" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.894569 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8"] Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.960845 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea53fa95-6178-4658-a817-773696aac856-webhook-cert\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.960911 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea53fa95-6178-4658-a817-773696aac856-apiservice-cert\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:43 crc kubenswrapper[4685]: I0128 12:40:43.960950 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzqlq\" (UniqueName: \"kubernetes.io/projected/ea53fa95-6178-4658-a817-773696aac856-kube-api-access-hzqlq\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.062402 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzqlq\" (UniqueName: \"kubernetes.io/projected/ea53fa95-6178-4658-a817-773696aac856-kube-api-access-hzqlq\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.062499 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea53fa95-6178-4658-a817-773696aac856-webhook-cert\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.062555 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea53fa95-6178-4658-a817-773696aac856-apiservice-cert\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.068122 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea53fa95-6178-4658-a817-773696aac856-apiservice-cert\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.074304 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea53fa95-6178-4658-a817-773696aac856-webhook-cert\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.083047 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzqlq\" (UniqueName: \"kubernetes.io/projected/ea53fa95-6178-4658-a817-773696aac856-kube-api-access-hzqlq\") pod \"mariadb-operator-controller-manager-5c6689bf87-nwxg8\" (UID: \"ea53fa95-6178-4658-a817-773696aac856\") " pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.197556 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:44 crc kubenswrapper[4685]: I0128 12:40:44.410984 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8"] Jan 28 12:40:44 crc kubenswrapper[4685]: W0128 12:40:44.422084 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea53fa95_6178_4658_a817_773696aac856.slice/crio-ebeb074029462d14153c9d43c1957b612f8e462b01a9a5343396bac3931596a2 WatchSource:0}: Error finding container ebeb074029462d14153c9d43c1957b612f8e462b01a9a5343396bac3931596a2: Status 404 returned error can't find the container with id ebeb074029462d14153c9d43c1957b612f8e462b01a9a5343396bac3931596a2 Jan 28 12:40:45 crc kubenswrapper[4685]: I0128 12:40:45.315472 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" event={"ID":"ea53fa95-6178-4658-a817-773696aac856","Type":"ContainerStarted","Data":"ebeb074029462d14153c9d43c1957b612f8e462b01a9a5343396bac3931596a2"} Jan 28 12:40:50 crc kubenswrapper[4685]: I0128 12:40:50.344682 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" event={"ID":"ea53fa95-6178-4658-a817-773696aac856","Type":"ContainerStarted","Data":"c0a6611a108507c78517f3b85ad068b98da25d12c5bff5094cd5289378b01b1c"} Jan 28 12:40:50 crc kubenswrapper[4685]: I0128 12:40:50.345442 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.417524 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" podStartSLOduration=7.160512383 podStartE2EDuration="12.417504945s" podCreationTimestamp="2026-01-28 12:40:43 +0000 UTC" firstStartedPulling="2026-01-28 12:40:44.424051797 +0000 UTC m=+1195.511465642" lastFinishedPulling="2026-01-28 12:40:49.681044379 +0000 UTC m=+1200.768458204" observedRunningTime="2026-01-28 12:40:50.373451138 +0000 UTC m=+1201.460864973" watchObservedRunningTime="2026-01-28 12:40:55.417504945 +0000 UTC m=+1206.504918780" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.423194 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lk7tc"] Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.424656 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.438429 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lk7tc"] Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.548693 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qptff\" (UniqueName: \"kubernetes.io/projected/f554c00a-e999-4348-8217-66e4e2f2edee-kube-api-access-qptff\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.548771 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-utilities\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.548905 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-catalog-content\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.650214 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qptff\" (UniqueName: \"kubernetes.io/projected/f554c00a-e999-4348-8217-66e4e2f2edee-kube-api-access-qptff\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.650541 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-utilities\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.650693 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-catalog-content\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.651454 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-utilities\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.651557 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-catalog-content\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.675389 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qptff\" (UniqueName: \"kubernetes.io/projected/f554c00a-e999-4348-8217-66e4e2f2edee-kube-api-access-qptff\") pod \"community-operators-lk7tc\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:55 crc kubenswrapper[4685]: I0128 12:40:55.741649 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:40:56 crc kubenswrapper[4685]: I0128 12:40:56.252799 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lk7tc"] Jan 28 12:40:56 crc kubenswrapper[4685]: I0128 12:40:56.376552 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lk7tc" event={"ID":"f554c00a-e999-4348-8217-66e4e2f2edee","Type":"ContainerStarted","Data":"4c3232d6329502688d55204776b952029ca97e5006758ec928a56d59944abc1e"} Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.069279 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.069391 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.069461 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.070083 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"62facba6395aa0c2d16f630a44d637954256b42216e7c75dfe642554c0dcdb37"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.070127 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://62facba6395aa0c2d16f630a44d637954256b42216e7c75dfe642554c0dcdb37" gracePeriod=600 Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.385494 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="62facba6395aa0c2d16f630a44d637954256b42216e7c75dfe642554c0dcdb37" exitCode=0 Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.385665 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"62facba6395aa0c2d16f630a44d637954256b42216e7c75dfe642554c0dcdb37"} Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.385957 4685 scope.go:117] "RemoveContainer" containerID="121e44d10a06506539fa644a7a14ed20205554003b9b8ca8281810f57af222bf" Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.388519 4685 generic.go:334] "Generic (PLEG): container finished" podID="f554c00a-e999-4348-8217-66e4e2f2edee" containerID="813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1" exitCode=0 Jan 28 12:40:57 crc kubenswrapper[4685]: I0128 12:40:57.388578 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lk7tc" event={"ID":"f554c00a-e999-4348-8217-66e4e2f2edee","Type":"ContainerDied","Data":"813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1"} Jan 28 12:40:58 crc kubenswrapper[4685]: I0128 12:40:58.394825 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"fe6797eb1526f7a98dbf830c75b37ec963fdbf0115e31ebdafc20e877843581c"} Jan 28 12:40:59 crc kubenswrapper[4685]: I0128 12:40:59.405393 4685 generic.go:334] "Generic (PLEG): container finished" podID="f554c00a-e999-4348-8217-66e4e2f2edee" containerID="5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6" exitCode=0 Jan 28 12:40:59 crc kubenswrapper[4685]: I0128 12:40:59.405477 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lk7tc" event={"ID":"f554c00a-e999-4348-8217-66e4e2f2edee","Type":"ContainerDied","Data":"5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6"} Jan 28 12:41:00 crc kubenswrapper[4685]: I0128 12:41:00.413773 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lk7tc" event={"ID":"f554c00a-e999-4348-8217-66e4e2f2edee","Type":"ContainerStarted","Data":"e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca"} Jan 28 12:41:00 crc kubenswrapper[4685]: I0128 12:41:00.433902 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lk7tc" podStartSLOduration=3.046020656 podStartE2EDuration="5.433885475s" podCreationTimestamp="2026-01-28 12:40:55 +0000 UTC" firstStartedPulling="2026-01-28 12:40:57.391267465 +0000 UTC m=+1208.478681300" lastFinishedPulling="2026-01-28 12:40:59.779132274 +0000 UTC m=+1210.866546119" observedRunningTime="2026-01-28 12:41:00.430811258 +0000 UTC m=+1211.518225093" watchObservedRunningTime="2026-01-28 12:41:00.433885475 +0000 UTC m=+1211.521299310" Jan 28 12:41:04 crc kubenswrapper[4685]: I0128 12:41:04.202028 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-5c6689bf87-nwxg8" Jan 28 12:41:05 crc kubenswrapper[4685]: I0128 12:41:05.742000 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:41:05 crc kubenswrapper[4685]: I0128 12:41:05.742342 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:41:05 crc kubenswrapper[4685]: I0128 12:41:05.810746 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:41:06 crc kubenswrapper[4685]: I0128 12:41:06.502748 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:41:06 crc kubenswrapper[4685]: I0128 12:41:06.860992 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-mgc22"] Jan 28 12:41:06 crc kubenswrapper[4685]: I0128 12:41:06.861723 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-mgc22" Jan 28 12:41:06 crc kubenswrapper[4685]: I0128 12:41:06.863978 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-8xbt2" Jan 28 12:41:06 crc kubenswrapper[4685]: I0128 12:41:06.869396 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-mgc22"] Jan 28 12:41:06 crc kubenswrapper[4685]: I0128 12:41:06.925205 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-464r7\" (UniqueName: \"kubernetes.io/projected/e546abc8-a12d-4d2d-863b-116e8100955c-kube-api-access-464r7\") pod \"infra-operator-index-mgc22\" (UID: \"e546abc8-a12d-4d2d-863b-116e8100955c\") " pod="openstack-operators/infra-operator-index-mgc22" Jan 28 12:41:07 crc kubenswrapper[4685]: I0128 12:41:07.026853 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-464r7\" (UniqueName: \"kubernetes.io/projected/e546abc8-a12d-4d2d-863b-116e8100955c-kube-api-access-464r7\") pod \"infra-operator-index-mgc22\" (UID: \"e546abc8-a12d-4d2d-863b-116e8100955c\") " pod="openstack-operators/infra-operator-index-mgc22" Jan 28 12:41:07 crc kubenswrapper[4685]: I0128 12:41:07.047125 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-464r7\" (UniqueName: \"kubernetes.io/projected/e546abc8-a12d-4d2d-863b-116e8100955c-kube-api-access-464r7\") pod \"infra-operator-index-mgc22\" (UID: \"e546abc8-a12d-4d2d-863b-116e8100955c\") " pod="openstack-operators/infra-operator-index-mgc22" Jan 28 12:41:07 crc kubenswrapper[4685]: I0128 12:41:07.176577 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-mgc22" Jan 28 12:41:07 crc kubenswrapper[4685]: I0128 12:41:07.589555 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-mgc22"] Jan 28 12:41:07 crc kubenswrapper[4685]: W0128 12:41:07.590879 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode546abc8_a12d_4d2d_863b_116e8100955c.slice/crio-8ca808b266394162f7218a965c3e944117c83616645b1485963cb0b0d2f14cd2 WatchSource:0}: Error finding container 8ca808b266394162f7218a965c3e944117c83616645b1485963cb0b0d2f14cd2: Status 404 returned error can't find the container with id 8ca808b266394162f7218a965c3e944117c83616645b1485963cb0b0d2f14cd2 Jan 28 12:41:08 crc kubenswrapper[4685]: I0128 12:41:08.458041 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-mgc22" event={"ID":"e546abc8-a12d-4d2d-863b-116e8100955c","Type":"ContainerStarted","Data":"8ca808b266394162f7218a965c3e944117c83616645b1485963cb0b0d2f14cd2"} Jan 28 12:41:09 crc kubenswrapper[4685]: I0128 12:41:09.469221 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-mgc22" event={"ID":"e546abc8-a12d-4d2d-863b-116e8100955c","Type":"ContainerStarted","Data":"5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a"} Jan 28 12:41:09 crc kubenswrapper[4685]: I0128 12:41:09.490913 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-mgc22" podStartSLOduration=1.800695509 podStartE2EDuration="3.490892009s" podCreationTimestamp="2026-01-28 12:41:06 +0000 UTC" firstStartedPulling="2026-01-28 12:41:07.592415796 +0000 UTC m=+1218.679829631" lastFinishedPulling="2026-01-28 12:41:09.282612296 +0000 UTC m=+1220.370026131" observedRunningTime="2026-01-28 12:41:09.485853926 +0000 UTC m=+1220.573267761" watchObservedRunningTime="2026-01-28 12:41:09.490892009 +0000 UTC m=+1220.578305854" Jan 28 12:41:10 crc kubenswrapper[4685]: I0128 12:41:10.455803 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lk7tc"] Jan 28 12:41:10 crc kubenswrapper[4685]: I0128 12:41:10.456338 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lk7tc" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="registry-server" containerID="cri-o://e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca" gracePeriod=2 Jan 28 12:41:11 crc kubenswrapper[4685]: I0128 12:41:11.860492 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-mgc22"] Jan 28 12:41:11 crc kubenswrapper[4685]: I0128 12:41:11.860991 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-index-mgc22" podUID="e546abc8-a12d-4d2d-863b-116e8100955c" containerName="registry-server" containerID="cri-o://5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a" gracePeriod=2 Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.311112 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-mgc22" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.409038 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-464r7\" (UniqueName: \"kubernetes.io/projected/e546abc8-a12d-4d2d-863b-116e8100955c-kube-api-access-464r7\") pod \"e546abc8-a12d-4d2d-863b-116e8100955c\" (UID: \"e546abc8-a12d-4d2d-863b-116e8100955c\") " Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.414583 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e546abc8-a12d-4d2d-863b-116e8100955c-kube-api-access-464r7" (OuterVolumeSpecName: "kube-api-access-464r7") pod "e546abc8-a12d-4d2d-863b-116e8100955c" (UID: "e546abc8-a12d-4d2d-863b-116e8100955c"). InnerVolumeSpecName "kube-api-access-464r7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.453057 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.497441 4685 generic.go:334] "Generic (PLEG): container finished" podID="e546abc8-a12d-4d2d-863b-116e8100955c" containerID="5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a" exitCode=0 Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.497503 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-mgc22" event={"ID":"e546abc8-a12d-4d2d-863b-116e8100955c","Type":"ContainerDied","Data":"5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a"} Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.497535 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-mgc22" event={"ID":"e546abc8-a12d-4d2d-863b-116e8100955c","Type":"ContainerDied","Data":"8ca808b266394162f7218a965c3e944117c83616645b1485963cb0b0d2f14cd2"} Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.497555 4685 scope.go:117] "RemoveContainer" containerID="5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.497659 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-mgc22" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.502249 4685 generic.go:334] "Generic (PLEG): container finished" podID="f554c00a-e999-4348-8217-66e4e2f2edee" containerID="e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca" exitCode=0 Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.502282 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lk7tc" event={"ID":"f554c00a-e999-4348-8217-66e4e2f2edee","Type":"ContainerDied","Data":"e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca"} Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.502304 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lk7tc" event={"ID":"f554c00a-e999-4348-8217-66e4e2f2edee","Type":"ContainerDied","Data":"4c3232d6329502688d55204776b952029ca97e5006758ec928a56d59944abc1e"} Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.502336 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lk7tc" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.511465 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-464r7\" (UniqueName: \"kubernetes.io/projected/e546abc8-a12d-4d2d-863b-116e8100955c-kube-api-access-464r7\") on node \"crc\" DevicePath \"\"" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.519147 4685 scope.go:117] "RemoveContainer" containerID="5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a" Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.519810 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a\": container with ID starting with 5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a not found: ID does not exist" containerID="5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.519843 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a"} err="failed to get container status \"5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a\": rpc error: code = NotFound desc = could not find container \"5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a\": container with ID starting with 5902d64bf8614bf1f0f8d0f8d76e63a62778dd5bc8ad62fe77d65d81569e5f0a not found: ID does not exist" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.519977 4685 scope.go:117] "RemoveContainer" containerID="e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.528391 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-mgc22"] Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.532015 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-index-mgc22"] Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.535623 4685 scope.go:117] "RemoveContainer" containerID="5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.549702 4685 scope.go:117] "RemoveContainer" containerID="813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.553583 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e546abc8-a12d-4d2d-863b-116e8100955c" path="/var/lib/kubelet/pods/e546abc8-a12d-4d2d-863b-116e8100955c/volumes" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.572946 4685 scope.go:117] "RemoveContainer" containerID="e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca" Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.585020 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca\": container with ID starting with e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca not found: ID does not exist" containerID="e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.585063 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca"} err="failed to get container status \"e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca\": rpc error: code = NotFound desc = could not find container \"e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca\": container with ID starting with e2766040913d563c55943deb9e8dbc89d8db1f0f1732181c4d1faf753958cbca not found: ID does not exist" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.585099 4685 scope.go:117] "RemoveContainer" containerID="5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6" Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.586509 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6\": container with ID starting with 5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6 not found: ID does not exist" containerID="5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.586566 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6"} err="failed to get container status \"5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6\": rpc error: code = NotFound desc = could not find container \"5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6\": container with ID starting with 5649f7ae72326dc4cae5e4dcb1989efffd5cf448f8b727a7a6b59a09608cd9d6 not found: ID does not exist" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.586600 4685 scope.go:117] "RemoveContainer" containerID="813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1" Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.586909 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1\": container with ID starting with 813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1 not found: ID does not exist" containerID="813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.586941 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1"} err="failed to get container status \"813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1\": rpc error: code = NotFound desc = could not find container \"813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1\": container with ID starting with 813b3e0662cb496df9ff8cd381239fcef6a4d6bbe782b0a905aaa92ad0471ea1 not found: ID does not exist" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.612730 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qptff\" (UniqueName: \"kubernetes.io/projected/f554c00a-e999-4348-8217-66e4e2f2edee-kube-api-access-qptff\") pod \"f554c00a-e999-4348-8217-66e4e2f2edee\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.612878 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-catalog-content\") pod \"f554c00a-e999-4348-8217-66e4e2f2edee\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.612936 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-utilities\") pod \"f554c00a-e999-4348-8217-66e4e2f2edee\" (UID: \"f554c00a-e999-4348-8217-66e4e2f2edee\") " Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.613714 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-utilities" (OuterVolumeSpecName: "utilities") pod "f554c00a-e999-4348-8217-66e4e2f2edee" (UID: "f554c00a-e999-4348-8217-66e4e2f2edee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.619331 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f554c00a-e999-4348-8217-66e4e2f2edee-kube-api-access-qptff" (OuterVolumeSpecName: "kube-api-access-qptff") pod "f554c00a-e999-4348-8217-66e4e2f2edee" (UID: "f554c00a-e999-4348-8217-66e4e2f2edee"). InnerVolumeSpecName "kube-api-access-qptff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.663521 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-jbffg"] Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.663764 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="extract-utilities" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.663778 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="extract-utilities" Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.663792 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e546abc8-a12d-4d2d-863b-116e8100955c" containerName="registry-server" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.663799 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e546abc8-a12d-4d2d-863b-116e8100955c" containerName="registry-server" Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.663810 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="extract-content" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.663816 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="extract-content" Jan 28 12:41:12 crc kubenswrapper[4685]: E0128 12:41:12.663832 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="registry-server" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.663837 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="registry-server" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.663924 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e546abc8-a12d-4d2d-863b-116e8100955c" containerName="registry-server" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.663935 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" containerName="registry-server" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.664299 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.667707 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-8xbt2" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.677288 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f554c00a-e999-4348-8217-66e4e2f2edee" (UID: "f554c00a-e999-4348-8217-66e4e2f2edee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.679805 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-jbffg"] Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.714033 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.714066 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qptff\" (UniqueName: \"kubernetes.io/projected/f554c00a-e999-4348-8217-66e4e2f2edee-kube-api-access-qptff\") on node \"crc\" DevicePath \"\"" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.714078 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f554c00a-e999-4348-8217-66e4e2f2edee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.816122 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh5c4\" (UniqueName: \"kubernetes.io/projected/95a2d809-2dbf-456c-accd-390fff7ad267-kube-api-access-fh5c4\") pod \"infra-operator-index-jbffg\" (UID: \"95a2d809-2dbf-456c-accd-390fff7ad267\") " pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.835992 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lk7tc"] Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.840837 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lk7tc"] Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.918627 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh5c4\" (UniqueName: \"kubernetes.io/projected/95a2d809-2dbf-456c-accd-390fff7ad267-kube-api-access-fh5c4\") pod \"infra-operator-index-jbffg\" (UID: \"95a2d809-2dbf-456c-accd-390fff7ad267\") " pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.935570 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh5c4\" (UniqueName: \"kubernetes.io/projected/95a2d809-2dbf-456c-accd-390fff7ad267-kube-api-access-fh5c4\") pod \"infra-operator-index-jbffg\" (UID: \"95a2d809-2dbf-456c-accd-390fff7ad267\") " pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:12 crc kubenswrapper[4685]: I0128 12:41:12.981565 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:13 crc kubenswrapper[4685]: I0128 12:41:13.374686 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-jbffg"] Jan 28 12:41:13 crc kubenswrapper[4685]: I0128 12:41:13.509534 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-jbffg" event={"ID":"95a2d809-2dbf-456c-accd-390fff7ad267","Type":"ContainerStarted","Data":"408dc489e0ae29e66dc7c16d7a9f09b8225965a920673e6b31f9ca1ea7ab8a96"} Jan 28 12:41:14 crc kubenswrapper[4685]: I0128 12:41:14.554282 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f554c00a-e999-4348-8217-66e4e2f2edee" path="/var/lib/kubelet/pods/f554c00a-e999-4348-8217-66e4e2f2edee/volumes" Jan 28 12:41:15 crc kubenswrapper[4685]: I0128 12:41:15.524963 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-jbffg" event={"ID":"95a2d809-2dbf-456c-accd-390fff7ad267","Type":"ContainerStarted","Data":"2417b6a0563fa908c4ef4e76cd4b46e2da5017137d136f07328c9292c304df11"} Jan 28 12:41:15 crc kubenswrapper[4685]: I0128 12:41:15.536354 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-jbffg" podStartSLOduration=1.701426613 podStartE2EDuration="3.536294931s" podCreationTimestamp="2026-01-28 12:41:12 +0000 UTC" firstStartedPulling="2026-01-28 12:41:13.379476327 +0000 UTC m=+1224.466890162" lastFinishedPulling="2026-01-28 12:41:15.214344635 +0000 UTC m=+1226.301758480" observedRunningTime="2026-01-28 12:41:15.535701635 +0000 UTC m=+1226.623115500" watchObservedRunningTime="2026-01-28 12:41:15.536294931 +0000 UTC m=+1226.623708776" Jan 28 12:41:22 crc kubenswrapper[4685]: I0128 12:41:22.982214 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:22 crc kubenswrapper[4685]: I0128 12:41:22.982520 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:23 crc kubenswrapper[4685]: I0128 12:41:23.023512 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:23 crc kubenswrapper[4685]: I0128 12:41:23.600485 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-index-jbffg" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.514365 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd"] Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.517501 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.519491 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-dvgnb" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.528388 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd"] Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.657331 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqdsk\" (UniqueName: \"kubernetes.io/projected/539a4ff7-66af-412d-a54a-bfe010e856b4-kube-api-access-nqdsk\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.657424 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.657462 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.759607 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqdsk\" (UniqueName: \"kubernetes.io/projected/539a4ff7-66af-412d-a54a-bfe010e856b4-kube-api-access-nqdsk\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.759695 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.759725 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.760136 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.760307 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.780987 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqdsk\" (UniqueName: \"kubernetes.io/projected/539a4ff7-66af-412d-a54a-bfe010e856b4-kube-api-access-nqdsk\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:38 crc kubenswrapper[4685]: I0128 12:41:38.839513 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:39 crc kubenswrapper[4685]: I0128 12:41:39.075013 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd"] Jan 28 12:41:39 crc kubenswrapper[4685]: W0128 12:41:39.079921 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod539a4ff7_66af_412d_a54a_bfe010e856b4.slice/crio-4d22b34b3368eff2dc2907e91075adc8a98352834c9a48399244a0cc9ad29c6e WatchSource:0}: Error finding container 4d22b34b3368eff2dc2907e91075adc8a98352834c9a48399244a0cc9ad29c6e: Status 404 returned error can't find the container with id 4d22b34b3368eff2dc2907e91075adc8a98352834c9a48399244a0cc9ad29c6e Jan 28 12:41:39 crc kubenswrapper[4685]: I0128 12:41:39.673853 4685 generic.go:334] "Generic (PLEG): container finished" podID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerID="3b3bb20f2bdb6b9fb0c4827b256502ae17e6c908c3a41735858e840cfeb83875" exitCode=0 Jan 28 12:41:39 crc kubenswrapper[4685]: I0128 12:41:39.674134 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" event={"ID":"539a4ff7-66af-412d-a54a-bfe010e856b4","Type":"ContainerDied","Data":"3b3bb20f2bdb6b9fb0c4827b256502ae17e6c908c3a41735858e840cfeb83875"} Jan 28 12:41:39 crc kubenswrapper[4685]: I0128 12:41:39.674191 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" event={"ID":"539a4ff7-66af-412d-a54a-bfe010e856b4","Type":"ContainerStarted","Data":"4d22b34b3368eff2dc2907e91075adc8a98352834c9a48399244a0cc9ad29c6e"} Jan 28 12:41:41 crc kubenswrapper[4685]: I0128 12:41:41.686409 4685 generic.go:334] "Generic (PLEG): container finished" podID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerID="39571e3920dbe42ce00b15291bb8f8f88116cfb3a8d2f4c45db7b2a4472335aa" exitCode=0 Jan 28 12:41:41 crc kubenswrapper[4685]: I0128 12:41:41.686461 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" event={"ID":"539a4ff7-66af-412d-a54a-bfe010e856b4","Type":"ContainerDied","Data":"39571e3920dbe42ce00b15291bb8f8f88116cfb3a8d2f4c45db7b2a4472335aa"} Jan 28 12:41:42 crc kubenswrapper[4685]: I0128 12:41:42.696823 4685 generic.go:334] "Generic (PLEG): container finished" podID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerID="05e5b16ebb4c9f10d8b74544855ba713d4c3e11746505db3b197b0ad79fc3be5" exitCode=0 Jan 28 12:41:42 crc kubenswrapper[4685]: I0128 12:41:42.696874 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" event={"ID":"539a4ff7-66af-412d-a54a-bfe010e856b4","Type":"ContainerDied","Data":"05e5b16ebb4c9f10d8b74544855ba713d4c3e11746505db3b197b0ad79fc3be5"} Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.025897 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.172744 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-bundle\") pod \"539a4ff7-66af-412d-a54a-bfe010e856b4\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.173136 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-util\") pod \"539a4ff7-66af-412d-a54a-bfe010e856b4\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.173313 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqdsk\" (UniqueName: \"kubernetes.io/projected/539a4ff7-66af-412d-a54a-bfe010e856b4-kube-api-access-nqdsk\") pod \"539a4ff7-66af-412d-a54a-bfe010e856b4\" (UID: \"539a4ff7-66af-412d-a54a-bfe010e856b4\") " Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.176728 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-bundle" (OuterVolumeSpecName: "bundle") pod "539a4ff7-66af-412d-a54a-bfe010e856b4" (UID: "539a4ff7-66af-412d-a54a-bfe010e856b4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.181986 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/539a4ff7-66af-412d-a54a-bfe010e856b4-kube-api-access-nqdsk" (OuterVolumeSpecName: "kube-api-access-nqdsk") pod "539a4ff7-66af-412d-a54a-bfe010e856b4" (UID: "539a4ff7-66af-412d-a54a-bfe010e856b4"). InnerVolumeSpecName "kube-api-access-nqdsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.188994 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-util" (OuterVolumeSpecName: "util") pod "539a4ff7-66af-412d-a54a-bfe010e856b4" (UID: "539a4ff7-66af-412d-a54a-bfe010e856b4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.274720 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.274768 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/539a4ff7-66af-412d-a54a-bfe010e856b4-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.274780 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqdsk\" (UniqueName: \"kubernetes.io/projected/539a4ff7-66af-412d-a54a-bfe010e856b4-kube-api-access-nqdsk\") on node \"crc\" DevicePath \"\"" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.711843 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" event={"ID":"539a4ff7-66af-412d-a54a-bfe010e856b4","Type":"ContainerDied","Data":"4d22b34b3368eff2dc2907e91075adc8a98352834c9a48399244a0cc9ad29c6e"} Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.711909 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd" Jan 28 12:41:44 crc kubenswrapper[4685]: I0128 12:41:44.711921 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d22b34b3368eff2dc2907e91075adc8a98352834c9a48399244a0cc9ad29c6e" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.620531 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf"] Jan 28 12:41:53 crc kubenswrapper[4685]: E0128 12:41:53.621891 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerName="pull" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.621914 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerName="pull" Jan 28 12:41:53 crc kubenswrapper[4685]: E0128 12:41:53.621936 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerName="util" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.621944 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerName="util" Jan 28 12:41:53 crc kubenswrapper[4685]: E0128 12:41:53.621959 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerName="extract" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.621968 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerName="extract" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.622103 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="539a4ff7-66af-412d-a54a-bfe010e856b4" containerName="extract" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.622800 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.624590 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qqlph" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.626331 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-service-cert" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.638225 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf"] Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.695592 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d9afeea9-b228-4b11-8068-079d2093771c-apiservice-cert\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.695667 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lzzq\" (UniqueName: \"kubernetes.io/projected/d9afeea9-b228-4b11-8068-079d2093771c-kube-api-access-9lzzq\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.695703 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d9afeea9-b228-4b11-8068-079d2093771c-webhook-cert\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.796926 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d9afeea9-b228-4b11-8068-079d2093771c-apiservice-cert\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.796987 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lzzq\" (UniqueName: \"kubernetes.io/projected/d9afeea9-b228-4b11-8068-079d2093771c-kube-api-access-9lzzq\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.797017 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d9afeea9-b228-4b11-8068-079d2093771c-webhook-cert\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.803136 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d9afeea9-b228-4b11-8068-079d2093771c-webhook-cert\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.803208 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d9afeea9-b228-4b11-8068-079d2093771c-apiservice-cert\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.814851 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lzzq\" (UniqueName: \"kubernetes.io/projected/d9afeea9-b228-4b11-8068-079d2093771c-kube-api-access-9lzzq\") pod \"infra-operator-controller-manager-645c8ff456-fl4nf\" (UID: \"d9afeea9-b228-4b11-8068-079d2093771c\") " pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:53 crc kubenswrapper[4685]: I0128 12:41:53.949970 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:54 crc kubenswrapper[4685]: I0128 12:41:54.232299 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf"] Jan 28 12:41:54 crc kubenswrapper[4685]: I0128 12:41:54.765588 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" event={"ID":"d9afeea9-b228-4b11-8068-079d2093771c","Type":"ContainerStarted","Data":"402a130cbc3ce6ea446f6498e4ffc0a2d89c5148fb5e870606399e5875985359"} Jan 28 12:41:58 crc kubenswrapper[4685]: I0128 12:41:58.807764 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" event={"ID":"d9afeea9-b228-4b11-8068-079d2093771c","Type":"ContainerStarted","Data":"f09da93c8010bc73ce887805bb312077d3257fed8f03477c7d29ce787e712337"} Jan 28 12:41:58 crc kubenswrapper[4685]: I0128 12:41:58.808081 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:41:58 crc kubenswrapper[4685]: I0128 12:41:58.828539 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" podStartSLOduration=1.573554312 podStartE2EDuration="5.82852128s" podCreationTimestamp="2026-01-28 12:41:53 +0000 UTC" firstStartedPulling="2026-01-28 12:41:54.263975194 +0000 UTC m=+1265.351389029" lastFinishedPulling="2026-01-28 12:41:58.518942162 +0000 UTC m=+1269.606355997" observedRunningTime="2026-01-28 12:41:58.824869887 +0000 UTC m=+1269.912283722" watchObservedRunningTime="2026-01-28 12:41:58.82852128 +0000 UTC m=+1269.915935115" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.782881 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstack-galera-0"] Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.784261 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.786817 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openshift-service-ca.crt" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.786955 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"galera-openstack-dockercfg-9zqrl" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.787346 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"kube-root-ca.crt" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.788003 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-scripts" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.791595 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-config-data" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.800255 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-0"] Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.804607 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstack-galera-1"] Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.805749 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.808108 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstack-galera-2"] Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.808801 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.818089 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-1"] Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.826289 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-2"] Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919464 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-config-data-default\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919513 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a53ef3ad-6fae-4b22-a498-cb541237093d-config-data-generated\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919543 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg6m5\" (UniqueName: \"kubernetes.io/projected/4f3b56e1-537e-46b6-b9fa-78b735d4721c-kube-api-access-wg6m5\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919571 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7b2z\" (UniqueName: \"kubernetes.io/projected/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-kube-api-access-k7b2z\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919590 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-kolla-config\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919625 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919649 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-operator-scripts\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919669 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919691 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919713 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-kolla-config\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919736 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-kolla-config\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919762 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919831 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmd6m\" (UniqueName: \"kubernetes.io/projected/a53ef3ad-6fae-4b22-a498-cb541237093d-kube-api-access-jmd6m\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919904 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage16-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage16-crc\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.919960 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-config-data-default\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.920060 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-operator-scripts\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.920113 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-config-data-default\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:01 crc kubenswrapper[4685]: I0128 12:42:01.920139 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4f3b56e1-537e-46b6-b9fa-78b735d4721c-config-data-generated\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.021368 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7b2z\" (UniqueName: \"kubernetes.io/projected/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-kube-api-access-k7b2z\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.021425 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-kolla-config\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.022124 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-kolla-config\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.022552 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") device mount path \"/mnt/openstack/pv01\"" pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023267 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023369 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-operator-scripts\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023398 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023417 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023447 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-kolla-config\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023477 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-kolla-config\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023514 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023532 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmd6m\" (UniqueName: \"kubernetes.io/projected/a53ef3ad-6fae-4b22-a498-cb541237093d-kube-api-access-jmd6m\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023560 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage16-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage16-crc\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023605 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-config-data-default\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023702 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-operator-scripts\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023735 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-config-data-default\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023753 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4f3b56e1-537e-46b6-b9fa-78b735d4721c-config-data-generated\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023799 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-config-data-default\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023821 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a53ef3ad-6fae-4b22-a498-cb541237093d-config-data-generated\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.023842 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg6m5\" (UniqueName: \"kubernetes.io/projected/4f3b56e1-537e-46b6-b9fa-78b735d4721c-kube-api-access-wg6m5\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.024474 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage16-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage16-crc\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") device mount path \"/mnt/openstack/pv16\"" pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.025046 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4f3b56e1-537e-46b6-b9fa-78b735d4721c-config-data-generated\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.025281 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") device mount path \"/mnt/openstack/pv06\"" pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.025509 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-config-data-default\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.025800 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-kolla-config\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.025833 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-config-data-default\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.025962 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-config-data-default\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.026027 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f3b56e1-537e-46b6-b9fa-78b735d4721c-operator-scripts\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.025988 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.026624 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a53ef3ad-6fae-4b22-a498-cb541237093d-operator-scripts\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.026642 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-kolla-config\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.027279 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.028637 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a53ef3ad-6fae-4b22-a498-cb541237093d-config-data-generated\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.042569 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg6m5\" (UniqueName: \"kubernetes.io/projected/4f3b56e1-537e-46b6-b9fa-78b735d4721c-kube-api-access-wg6m5\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.042573 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7b2z\" (UniqueName: \"kubernetes.io/projected/ae11d7ca-ae3f-41f9-b510-18fde2492aa2-kube-api-access-k7b2z\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.043068 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"4f3b56e1-537e-46b6-b9fa-78b735d4721c\") " pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.046078 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage16-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage16-crc\") pod \"openstack-galera-0\" (UID: \"ae11d7ca-ae3f-41f9-b510-18fde2492aa2\") " pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.048162 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.052103 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmd6m\" (UniqueName: \"kubernetes.io/projected/a53ef3ad-6fae-4b22-a498-cb541237093d-kube-api-access-jmd6m\") pod \"openstack-galera-1\" (UID: \"a53ef3ad-6fae-4b22-a498-cb541237093d\") " pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.098704 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.120436 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.129666 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:42:02 crc kubenswrapper[4685]: W0128 12:42:02.529410 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f3b56e1_537e_46b6_b9fa_78b735d4721c.slice/crio-18cd56a5ffa177c04ca87d9db63f28e717e4364ee878077e1d6056e762407334 WatchSource:0}: Error finding container 18cd56a5ffa177c04ca87d9db63f28e717e4364ee878077e1d6056e762407334: Status 404 returned error can't find the container with id 18cd56a5ffa177c04ca87d9db63f28e717e4364ee878077e1d6056e762407334 Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.533424 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-2"] Jan 28 12:42:02 crc kubenswrapper[4685]: W0128 12:42:02.537652 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae11d7ca_ae3f_41f9_b510_18fde2492aa2.slice/crio-86175a0def80e32dbc605a617d1c06522bb7bf8c5c9331ffd028faaf021d3307 WatchSource:0}: Error finding container 86175a0def80e32dbc605a617d1c06522bb7bf8c5c9331ffd028faaf021d3307: Status 404 returned error can't find the container with id 86175a0def80e32dbc605a617d1c06522bb7bf8c5c9331ffd028faaf021d3307 Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.538317 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-0"] Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.574942 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstack-galera-1"] Jan 28 12:42:02 crc kubenswrapper[4685]: W0128 12:42:02.581759 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda53ef3ad_6fae_4b22_a498_cb541237093d.slice/crio-039bedd97cd3b93a9caa471e0b17544f006c5d4884989d4462d1291ed4f918e1 WatchSource:0}: Error finding container 039bedd97cd3b93a9caa471e0b17544f006c5d4884989d4462d1291ed4f918e1: Status 404 returned error can't find the container with id 039bedd97cd3b93a9caa471e0b17544f006c5d4884989d4462d1291ed4f918e1 Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.828237 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"4f3b56e1-537e-46b6-b9fa-78b735d4721c","Type":"ContainerStarted","Data":"18cd56a5ffa177c04ca87d9db63f28e717e4364ee878077e1d6056e762407334"} Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.829685 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"a53ef3ad-6fae-4b22-a498-cb541237093d","Type":"ContainerStarted","Data":"039bedd97cd3b93a9caa471e0b17544f006c5d4884989d4462d1291ed4f918e1"} Jan 28 12:42:02 crc kubenswrapper[4685]: I0128 12:42:02.830847 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"ae11d7ca-ae3f-41f9-b510-18fde2492aa2","Type":"ContainerStarted","Data":"86175a0def80e32dbc605a617d1c06522bb7bf8c5c9331ffd028faaf021d3307"} Jan 28 12:42:03 crc kubenswrapper[4685]: I0128 12:42:03.955483 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-645c8ff456-fl4nf" Jan 28 12:42:07 crc kubenswrapper[4685]: I0128 12:42:07.950284 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/memcached-0"] Jan 28 12:42:07 crc kubenswrapper[4685]: I0128 12:42:07.951607 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:07 crc kubenswrapper[4685]: I0128 12:42:07.953804 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"memcached-config-data" Jan 28 12:42:07 crc kubenswrapper[4685]: I0128 12:42:07.955087 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"memcached-memcached-dockercfg-4gnl8" Jan 28 12:42:07 crc kubenswrapper[4685]: I0128 12:42:07.960091 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/memcached-0"] Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.021425 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/298c85ee-7f53-4f03-908b-42d6773623a6-kolla-config\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.021911 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/298c85ee-7f53-4f03-908b-42d6773623a6-config-data\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.022053 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbcnp\" (UniqueName: \"kubernetes.io/projected/298c85ee-7f53-4f03-908b-42d6773623a6-kube-api-access-rbcnp\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.123208 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/298c85ee-7f53-4f03-908b-42d6773623a6-kolla-config\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.123327 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/298c85ee-7f53-4f03-908b-42d6773623a6-config-data\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.123378 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbcnp\" (UniqueName: \"kubernetes.io/projected/298c85ee-7f53-4f03-908b-42d6773623a6-kube-api-access-rbcnp\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.124141 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/298c85ee-7f53-4f03-908b-42d6773623a6-kolla-config\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.125166 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/298c85ee-7f53-4f03-908b-42d6773623a6-config-data\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.150479 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbcnp\" (UniqueName: \"kubernetes.io/projected/298c85ee-7f53-4f03-908b-42d6773623a6-kube-api-access-rbcnp\") pod \"memcached-0\" (UID: \"298c85ee-7f53-4f03-908b-42d6773623a6\") " pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.283029 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.651809 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/memcached-0"] Jan 28 12:42:08 crc kubenswrapper[4685]: I0128 12:42:08.895923 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/memcached-0" event={"ID":"298c85ee-7f53-4f03-908b-42d6773623a6","Type":"ContainerStarted","Data":"2868d81618b874a569c035dc20f5f98d45acdcde1e0f9f5abe91e9c710beeb2d"} Jan 28 12:42:10 crc kubenswrapper[4685]: I0128 12:42:10.875143 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-rjcgk"] Jan 28 12:42:10 crc kubenswrapper[4685]: I0128 12:42:10.876767 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" Jan 28 12:42:10 crc kubenswrapper[4685]: I0128 12:42:10.880002 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-index-dockercfg-jttlt" Jan 28 12:42:10 crc kubenswrapper[4685]: I0128 12:42:10.885372 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-rjcgk"] Jan 28 12:42:10 crc kubenswrapper[4685]: I0128 12:42:10.977161 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw5q2\" (UniqueName: \"kubernetes.io/projected/48111578-9de9-426d-ad59-572d406679db-kube-api-access-cw5q2\") pod \"rabbitmq-cluster-operator-index-rjcgk\" (UID: \"48111578-9de9-426d-ad59-572d406679db\") " pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" Jan 28 12:42:11 crc kubenswrapper[4685]: I0128 12:42:11.078776 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw5q2\" (UniqueName: \"kubernetes.io/projected/48111578-9de9-426d-ad59-572d406679db-kube-api-access-cw5q2\") pod \"rabbitmq-cluster-operator-index-rjcgk\" (UID: \"48111578-9de9-426d-ad59-572d406679db\") " pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" Jan 28 12:42:11 crc kubenswrapper[4685]: I0128 12:42:11.099819 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw5q2\" (UniqueName: \"kubernetes.io/projected/48111578-9de9-426d-ad59-572d406679db-kube-api-access-cw5q2\") pod \"rabbitmq-cluster-operator-index-rjcgk\" (UID: \"48111578-9de9-426d-ad59-572d406679db\") " pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" Jan 28 12:42:11 crc kubenswrapper[4685]: I0128 12:42:11.202113 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" Jan 28 12:42:15 crc kubenswrapper[4685]: I0128 12:42:15.269770 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-rjcgk"] Jan 28 12:42:15 crc kubenswrapper[4685]: I0128 12:42:15.875993 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5lgtn"] Jan 28 12:42:15 crc kubenswrapper[4685]: I0128 12:42:15.882205 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:42:15 crc kubenswrapper[4685]: I0128 12:42:15.883642 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5lgtn"] Jan 28 12:42:15 crc kubenswrapper[4685]: I0128 12:42:15.959573 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvnxp\" (UniqueName: \"kubernetes.io/projected/e7b04b51-2455-470a-b67a-7bffd1f97b7a-kube-api-access-hvnxp\") pod \"rabbitmq-cluster-operator-index-5lgtn\" (UID: \"e7b04b51-2455-470a-b67a-7bffd1f97b7a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:42:16 crc kubenswrapper[4685]: I0128 12:42:16.061081 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvnxp\" (UniqueName: \"kubernetes.io/projected/e7b04b51-2455-470a-b67a-7bffd1f97b7a-kube-api-access-hvnxp\") pod \"rabbitmq-cluster-operator-index-5lgtn\" (UID: \"e7b04b51-2455-470a-b67a-7bffd1f97b7a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:42:16 crc kubenswrapper[4685]: I0128 12:42:16.088106 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvnxp\" (UniqueName: \"kubernetes.io/projected/e7b04b51-2455-470a-b67a-7bffd1f97b7a-kube-api-access-hvnxp\") pod \"rabbitmq-cluster-operator-index-5lgtn\" (UID: \"e7b04b51-2455-470a-b67a-7bffd1f97b7a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:42:16 crc kubenswrapper[4685]: I0128 12:42:16.219986 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:42:17 crc kubenswrapper[4685]: I0128 12:42:17.790904 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-rjcgk"] Jan 28 12:42:17 crc kubenswrapper[4685]: W0128 12:42:17.801992 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48111578_9de9_426d_ad59_572d406679db.slice/crio-77153bc1af6fbd72bd14a529bbcbb5b8263345d34aee319b25080a6dbfd88944 WatchSource:0}: Error finding container 77153bc1af6fbd72bd14a529bbcbb5b8263345d34aee319b25080a6dbfd88944: Status 404 returned error can't find the container with id 77153bc1af6fbd72bd14a529bbcbb5b8263345d34aee319b25080a6dbfd88944 Jan 28 12:42:17 crc kubenswrapper[4685]: I0128 12:42:17.849642 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5lgtn"] Jan 28 12:42:17 crc kubenswrapper[4685]: I0128 12:42:17.969182 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" event={"ID":"e7b04b51-2455-470a-b67a-7bffd1f97b7a","Type":"ContainerStarted","Data":"6655f971318b35104c7f8071424d9932cb9c76562718d2a0b5190c36c2e297b7"} Jan 28 12:42:17 crc kubenswrapper[4685]: I0128 12:42:17.970043 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" event={"ID":"48111578-9de9-426d-ad59-572d406679db","Type":"ContainerStarted","Data":"77153bc1af6fbd72bd14a529bbcbb5b8263345d34aee319b25080a6dbfd88944"} Jan 28 12:42:20 crc kubenswrapper[4685]: E0128 12:42:20.381207 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13" Jan 28 12:42:20 crc kubenswrapper[4685]: E0128 12:42:20.382059 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wg6m5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-2_glance-kuttl-tests(4f3b56e1-537e-46b6-b9fa-78b735d4721c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 12:42:20 crc kubenswrapper[4685]: E0128 12:42:20.383441 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="glance-kuttl-tests/openstack-galera-2" podUID="4f3b56e1-537e-46b6-b9fa-78b735d4721c" Jan 28 12:42:20 crc kubenswrapper[4685]: I0128 12:42:20.992026 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"a53ef3ad-6fae-4b22-a498-cb541237093d","Type":"ContainerStarted","Data":"151479befa26a52f0e8a09a67fa96ac99ee80424902227eb2bc5bd24b5759ca1"} Jan 28 12:42:24 crc kubenswrapper[4685]: I0128 12:42:24.018631 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"ae11d7ca-ae3f-41f9-b510-18fde2492aa2","Type":"ContainerStarted","Data":"b73768d64d5b59221842c1ef9f4861ea4d57cddbd5b127d4c2316fd5858b532b"} Jan 28 12:42:24 crc kubenswrapper[4685]: I0128 12:42:24.021693 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"4f3b56e1-537e-46b6-b9fa-78b735d4721c","Type":"ContainerStarted","Data":"5118fe6dc3a5bd416ff263cf4c828f678e29119e57873a9610814f050e10c9c7"} Jan 28 12:42:29 crc kubenswrapper[4685]: I0128 12:42:29.053087 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/memcached-0" event={"ID":"298c85ee-7f53-4f03-908b-42d6773623a6","Type":"ContainerStarted","Data":"cc3177987ff4674a3b44748e4d5a2a5a229d6fe33b6ced867be9a4f420711b2e"} Jan 28 12:42:29 crc kubenswrapper[4685]: I0128 12:42:29.053667 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:29 crc kubenswrapper[4685]: I0128 12:42:29.077656 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/memcached-0" podStartSLOduration=2.3867419930000002 podStartE2EDuration="22.077638081s" podCreationTimestamp="2026-01-28 12:42:07 +0000 UTC" firstStartedPulling="2026-01-28 12:42:08.671947909 +0000 UTC m=+1279.759361744" lastFinishedPulling="2026-01-28 12:42:28.362843997 +0000 UTC m=+1299.450257832" observedRunningTime="2026-01-28 12:42:29.073884625 +0000 UTC m=+1300.161298470" watchObservedRunningTime="2026-01-28 12:42:29.077638081 +0000 UTC m=+1300.165051906" Jan 28 12:42:33 crc kubenswrapper[4685]: I0128 12:42:33.284088 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/memcached-0" Jan 28 12:42:49 crc kubenswrapper[4685]: E0128 12:42:49.681016 4685 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest" Jan 28 12:42:49 crc kubenswrapper[4685]: E0128 12:42:49.681770 4685 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hvnxp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-index-5lgtn_openstack-operators(e7b04b51-2455-470a-b67a-7bffd1f97b7a): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Jan 28 12:42:49 crc kubenswrapper[4685]: E0128 12:42:49.685195 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" podUID="e7b04b51-2455-470a-b67a-7bffd1f97b7a" Jan 28 12:42:50 crc kubenswrapper[4685]: E0128 12:42:50.180968 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" podUID="e7b04b51-2455-470a-b67a-7bffd1f97b7a" Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.211546 4685 generic.go:334] "Generic (PLEG): container finished" podID="4f3b56e1-537e-46b6-b9fa-78b735d4721c" containerID="5118fe6dc3a5bd416ff263cf4c828f678e29119e57873a9610814f050e10c9c7" exitCode=0 Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.211655 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"4f3b56e1-537e-46b6-b9fa-78b735d4721c","Type":"ContainerDied","Data":"5118fe6dc3a5bd416ff263cf4c828f678e29119e57873a9610814f050e10c9c7"} Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.214675 4685 generic.go:334] "Generic (PLEG): container finished" podID="a53ef3ad-6fae-4b22-a498-cb541237093d" containerID="151479befa26a52f0e8a09a67fa96ac99ee80424902227eb2bc5bd24b5759ca1" exitCode=0 Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.214724 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"a53ef3ad-6fae-4b22-a498-cb541237093d","Type":"ContainerDied","Data":"151479befa26a52f0e8a09a67fa96ac99ee80424902227eb2bc5bd24b5759ca1"} Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.217963 4685 generic.go:334] "Generic (PLEG): container finished" podID="ae11d7ca-ae3f-41f9-b510-18fde2492aa2" containerID="b73768d64d5b59221842c1ef9f4861ea4d57cddbd5b127d4c2316fd5858b532b" exitCode=0 Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.218020 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"ae11d7ca-ae3f-41f9-b510-18fde2492aa2","Type":"ContainerDied","Data":"b73768d64d5b59221842c1ef9f4861ea4d57cddbd5b127d4c2316fd5858b532b"} Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.220663 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" event={"ID":"48111578-9de9-426d-ad59-572d406679db","Type":"ContainerStarted","Data":"2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6"} Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.220757 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" podUID="48111578-9de9-426d-ad59-572d406679db" containerName="registry-server" containerID="cri-o://2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6" gracePeriod=2 Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.310032 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" podStartSLOduration=8.955263769 podStartE2EDuration="45.310000567s" podCreationTimestamp="2026-01-28 12:42:10 +0000 UTC" firstStartedPulling="2026-01-28 12:42:17.822242323 +0000 UTC m=+1288.909656168" lastFinishedPulling="2026-01-28 12:42:54.176979131 +0000 UTC m=+1325.264392966" observedRunningTime="2026-01-28 12:42:55.304305497 +0000 UTC m=+1326.391719342" watchObservedRunningTime="2026-01-28 12:42:55.310000567 +0000 UTC m=+1326.397414402" Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.682890 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.738946 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw5q2\" (UniqueName: \"kubernetes.io/projected/48111578-9de9-426d-ad59-572d406679db-kube-api-access-cw5q2\") pod \"48111578-9de9-426d-ad59-572d406679db\" (UID: \"48111578-9de9-426d-ad59-572d406679db\") " Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.747335 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48111578-9de9-426d-ad59-572d406679db-kube-api-access-cw5q2" (OuterVolumeSpecName: "kube-api-access-cw5q2") pod "48111578-9de9-426d-ad59-572d406679db" (UID: "48111578-9de9-426d-ad59-572d406679db"). InnerVolumeSpecName "kube-api-access-cw5q2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:42:55 crc kubenswrapper[4685]: I0128 12:42:55.840275 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw5q2\" (UniqueName: \"kubernetes.io/projected/48111578-9de9-426d-ad59-572d406679db-kube-api-access-cw5q2\") on node \"crc\" DevicePath \"\"" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.229781 4685 generic.go:334] "Generic (PLEG): container finished" podID="48111578-9de9-426d-ad59-572d406679db" containerID="2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6" exitCode=0 Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.229869 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.229892 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" event={"ID":"48111578-9de9-426d-ad59-572d406679db","Type":"ContainerDied","Data":"2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6"} Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.235302 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-rjcgk" event={"ID":"48111578-9de9-426d-ad59-572d406679db","Type":"ContainerDied","Data":"77153bc1af6fbd72bd14a529bbcbb5b8263345d34aee319b25080a6dbfd88944"} Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.235343 4685 scope.go:117] "RemoveContainer" containerID="2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.241379 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-2" event={"ID":"4f3b56e1-537e-46b6-b9fa-78b735d4721c","Type":"ContainerStarted","Data":"63eaef3636534823d88f357b5f886163087120c00c6dd49af2f5160aa45244ff"} Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.244756 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-1" event={"ID":"a53ef3ad-6fae-4b22-a498-cb541237093d","Type":"ContainerStarted","Data":"922e36f76ee1b3a6c599ecc261e1d05c75ae4b901da0cba942319d1cf0e65b64"} Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.250649 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstack-galera-0" event={"ID":"ae11d7ca-ae3f-41f9-b510-18fde2492aa2","Type":"ContainerStarted","Data":"8162d50277c91e60c385a51841eb8d79b5e512b9bf134ee2f96409cc1cd7ff7f"} Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.262613 4685 scope.go:117] "RemoveContainer" containerID="2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6" Jan 28 12:42:56 crc kubenswrapper[4685]: E0128 12:42:56.263092 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6\": container with ID starting with 2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6 not found: ID does not exist" containerID="2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.263145 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6"} err="failed to get container status \"2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6\": rpc error: code = NotFound desc = could not find container \"2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6\": container with ID starting with 2a4c906494c0f1957d27007a01ca8cbde812fb301d8c0e48efd2632481a8dfd6 not found: ID does not exist" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.274105 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstack-galera-2" podStartSLOduration=-9223371980.580694 podStartE2EDuration="56.274081879s" podCreationTimestamp="2026-01-28 12:42:00 +0000 UTC" firstStartedPulling="2026-01-28 12:42:02.531777324 +0000 UTC m=+1273.619191159" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:42:56.266367592 +0000 UTC m=+1327.353781487" watchObservedRunningTime="2026-01-28 12:42:56.274081879 +0000 UTC m=+1327.361495714" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.286447 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-rjcgk"] Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.294962 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-rjcgk"] Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.314061 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstack-galera-0" podStartSLOduration=36.105246405 podStartE2EDuration="56.314033706s" podCreationTimestamp="2026-01-28 12:42:00 +0000 UTC" firstStartedPulling="2026-01-28 12:42:02.539748719 +0000 UTC m=+1273.627162574" lastFinishedPulling="2026-01-28 12:42:22.748536 +0000 UTC m=+1293.835949875" observedRunningTime="2026-01-28 12:42:56.308496689 +0000 UTC m=+1327.395910534" watchObservedRunningTime="2026-01-28 12:42:56.314033706 +0000 UTC m=+1327.401447551" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.330273 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstack-galera-1" podStartSLOduration=39.040259248 podStartE2EDuration="56.330253413s" podCreationTimestamp="2026-01-28 12:42:00 +0000 UTC" firstStartedPulling="2026-01-28 12:42:02.585576581 +0000 UTC m=+1273.672990416" lastFinishedPulling="2026-01-28 12:42:19.875570746 +0000 UTC m=+1290.962984581" observedRunningTime="2026-01-28 12:42:56.327052513 +0000 UTC m=+1327.414466348" watchObservedRunningTime="2026-01-28 12:42:56.330253413 +0000 UTC m=+1327.417667248" Jan 28 12:42:56 crc kubenswrapper[4685]: I0128 12:42:56.560062 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48111578-9de9-426d-ad59-572d406679db" path="/var/lib/kubelet/pods/48111578-9de9-426d-ad59-572d406679db/volumes" Jan 28 12:42:57 crc kubenswrapper[4685]: I0128 12:42:57.070017 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:42:57 crc kubenswrapper[4685]: I0128 12:42:57.070109 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:43:02 crc kubenswrapper[4685]: I0128 12:43:02.100041 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:43:02 crc kubenswrapper[4685]: I0128 12:43:02.100538 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:43:02 crc kubenswrapper[4685]: I0128 12:43:02.121058 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:43:02 crc kubenswrapper[4685]: I0128 12:43:02.121108 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:43:02 crc kubenswrapper[4685]: I0128 12:43:02.130264 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:43:02 crc kubenswrapper[4685]: I0128 12:43:02.130503 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:43:04 crc kubenswrapper[4685]: I0128 12:43:04.299984 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" event={"ID":"e7b04b51-2455-470a-b67a-7bffd1f97b7a","Type":"ContainerStarted","Data":"b7403a261f859b6fa65405b37fe1f89c010af9474d74f2ea00523010cb30356d"} Jan 28 12:43:04 crc kubenswrapper[4685]: I0128 12:43:04.854732 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:43:04 crc kubenswrapper[4685]: I0128 12:43:04.920130 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/openstack-galera-2" podUID="4f3b56e1-537e-46b6-b9fa-78b735d4721c" containerName="galera" probeResult="failure" output=< Jan 28 12:43:04 crc kubenswrapper[4685]: wsrep_local_state_comment (Joined) differs from Synced Jan 28 12:43:04 crc kubenswrapper[4685]: > Jan 28 12:43:05 crc kubenswrapper[4685]: I0128 12:43:05.324654 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" podStartSLOduration=4.205181496 podStartE2EDuration="50.324635692s" podCreationTimestamp="2026-01-28 12:42:15 +0000 UTC" firstStartedPulling="2026-01-28 12:42:17.857790065 +0000 UTC m=+1288.945203900" lastFinishedPulling="2026-01-28 12:43:03.977244261 +0000 UTC m=+1335.064658096" observedRunningTime="2026-01-28 12:43:05.321359179 +0000 UTC m=+1336.408773014" watchObservedRunningTime="2026-01-28 12:43:05.324635692 +0000 UTC m=+1336.412049527" Jan 28 12:43:06 crc kubenswrapper[4685]: I0128 12:43:06.220433 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:43:06 crc kubenswrapper[4685]: I0128 12:43:06.220510 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:43:06 crc kubenswrapper[4685]: I0128 12:43:06.249483 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:43:12 crc kubenswrapper[4685]: I0128 12:43:12.195432 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/openstack-galera-2" Jan 28 12:43:16 crc kubenswrapper[4685]: I0128 12:43:16.260268 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/rabbitmq-cluster-operator-index-5lgtn" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.779698 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/root-account-create-update-2gt8g"] Jan 28 12:43:20 crc kubenswrapper[4685]: E0128 12:43:20.780319 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48111578-9de9-426d-ad59-572d406679db" containerName="registry-server" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.780335 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="48111578-9de9-426d-ad59-572d406679db" containerName="registry-server" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.780470 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="48111578-9de9-426d-ad59-572d406679db" containerName="registry-server" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.780955 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.783099 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"openstack-mariadb-root-db-secret" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.793506 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/root-account-create-update-2gt8g"] Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.894718 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-operator-scripts\") pod \"root-account-create-update-2gt8g\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.894825 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6dxh\" (UniqueName: \"kubernetes.io/projected/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-kube-api-access-w6dxh\") pod \"root-account-create-update-2gt8g\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.996056 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-operator-scripts\") pod \"root-account-create-update-2gt8g\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.996149 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6dxh\" (UniqueName: \"kubernetes.io/projected/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-kube-api-access-w6dxh\") pod \"root-account-create-update-2gt8g\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:20 crc kubenswrapper[4685]: I0128 12:43:20.996795 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-operator-scripts\") pod \"root-account-create-update-2gt8g\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:21 crc kubenswrapper[4685]: I0128 12:43:21.014993 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6dxh\" (UniqueName: \"kubernetes.io/projected/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-kube-api-access-w6dxh\") pod \"root-account-create-update-2gt8g\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:21 crc kubenswrapper[4685]: I0128 12:43:21.096467 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:21 crc kubenswrapper[4685]: I0128 12:43:21.476653 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/root-account-create-update-2gt8g"] Jan 28 12:43:22 crc kubenswrapper[4685]: I0128 12:43:22.223394 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/openstack-galera-2" podUID="4f3b56e1-537e-46b6-b9fa-78b735d4721c" containerName="galera" probeResult="failure" output=< Jan 28 12:43:22 crc kubenswrapper[4685]: wsrep_local_state_comment (Donor/Desynced) differs from Synced Jan 28 12:43:22 crc kubenswrapper[4685]: > Jan 28 12:43:22 crc kubenswrapper[4685]: I0128 12:43:22.412469 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/root-account-create-update-2gt8g" event={"ID":"a5f94ceb-23bc-4de1-b757-bfe9cefe498b","Type":"ContainerStarted","Data":"9bfa10d3c3c904d0a93fca1f49900e9776156d5ace85d98175464ee843aa4de7"} Jan 28 12:43:22 crc kubenswrapper[4685]: I0128 12:43:22.412517 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/root-account-create-update-2gt8g" event={"ID":"a5f94ceb-23bc-4de1-b757-bfe9cefe498b","Type":"ContainerStarted","Data":"9bd658eee171daf3c3ad4a3caa817e76103850145e1406d1924c2012cab7b87b"} Jan 28 12:43:22 crc kubenswrapper[4685]: I0128 12:43:22.430590 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/root-account-create-update-2gt8g" podStartSLOduration=2.430572106 podStartE2EDuration="2.430572106s" podCreationTimestamp="2026-01-28 12:43:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:43:22.428034645 +0000 UTC m=+1353.515448480" watchObservedRunningTime="2026-01-28 12:43:22.430572106 +0000 UTC m=+1353.517985951" Jan 28 12:43:24 crc kubenswrapper[4685]: I0128 12:43:24.423939 4685 generic.go:334] "Generic (PLEG): container finished" podID="a5f94ceb-23bc-4de1-b757-bfe9cefe498b" containerID="9bfa10d3c3c904d0a93fca1f49900e9776156d5ace85d98175464ee843aa4de7" exitCode=0 Jan 28 12:43:24 crc kubenswrapper[4685]: I0128 12:43:24.424010 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/root-account-create-update-2gt8g" event={"ID":"a5f94ceb-23bc-4de1-b757-bfe9cefe498b","Type":"ContainerDied","Data":"9bfa10d3c3c904d0a93fca1f49900e9776156d5ace85d98175464ee843aa4de7"} Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.275387 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.338751 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/openstack-galera-1" Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.767864 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.864643 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6dxh\" (UniqueName: \"kubernetes.io/projected/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-kube-api-access-w6dxh\") pod \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.864838 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-operator-scripts\") pod \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\" (UID: \"a5f94ceb-23bc-4de1-b757-bfe9cefe498b\") " Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.865525 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a5f94ceb-23bc-4de1-b757-bfe9cefe498b" (UID: "a5f94ceb-23bc-4de1-b757-bfe9cefe498b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.872468 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-kube-api-access-w6dxh" (OuterVolumeSpecName: "kube-api-access-w6dxh") pod "a5f94ceb-23bc-4de1-b757-bfe9cefe498b" (UID: "a5f94ceb-23bc-4de1-b757-bfe9cefe498b"). InnerVolumeSpecName "kube-api-access-w6dxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.965809 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:43:25 crc kubenswrapper[4685]: I0128 12:43:25.965874 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6dxh\" (UniqueName: \"kubernetes.io/projected/a5f94ceb-23bc-4de1-b757-bfe9cefe498b-kube-api-access-w6dxh\") on node \"crc\" DevicePath \"\"" Jan 28 12:43:26 crc kubenswrapper[4685]: I0128 12:43:26.435485 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/root-account-create-update-2gt8g" event={"ID":"a5f94ceb-23bc-4de1-b757-bfe9cefe498b","Type":"ContainerDied","Data":"9bd658eee171daf3c3ad4a3caa817e76103850145e1406d1924c2012cab7b87b"} Jan 28 12:43:26 crc kubenswrapper[4685]: I0128 12:43:26.435525 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bd658eee171daf3c3ad4a3caa817e76103850145e1406d1924c2012cab7b87b" Jan 28 12:43:26 crc kubenswrapper[4685]: I0128 12:43:26.435533 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/root-account-create-update-2gt8g" Jan 28 12:43:26 crc kubenswrapper[4685]: I0128 12:43:26.780381 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:43:26 crc kubenswrapper[4685]: I0128 12:43:26.868697 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/openstack-galera-0" Jan 28 12:43:27 crc kubenswrapper[4685]: I0128 12:43:27.069411 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:43:27 crc kubenswrapper[4685]: I0128 12:43:27.069477 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.017794 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz"] Jan 28 12:43:29 crc kubenswrapper[4685]: E0128 12:43:29.018045 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5f94ceb-23bc-4de1-b757-bfe9cefe498b" containerName="mariadb-account-create-update" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.018057 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5f94ceb-23bc-4de1-b757-bfe9cefe498b" containerName="mariadb-account-create-update" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.018171 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5f94ceb-23bc-4de1-b757-bfe9cefe498b" containerName="mariadb-account-create-update" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.019018 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.021557 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-dvgnb" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.035798 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz"] Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.110081 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.110144 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cfbc\" (UniqueName: \"kubernetes.io/projected/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-kube-api-access-9cfbc\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.110169 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.211407 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.211510 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cfbc\" (UniqueName: \"kubernetes.io/projected/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-kube-api-access-9cfbc\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.211542 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.212172 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.212165 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.231804 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cfbc\" (UniqueName: \"kubernetes.io/projected/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-kube-api-access-9cfbc\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.336464 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:29 crc kubenswrapper[4685]: I0128 12:43:29.787477 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz"] Jan 28 12:43:30 crc kubenswrapper[4685]: I0128 12:43:30.462941 4685 generic.go:334] "Generic (PLEG): container finished" podID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerID="236f39c9b6062ba2268a786dff1624ccf722978d36a785e5d3a0129ec3ad700a" exitCode=0 Jan 28 12:43:30 crc kubenswrapper[4685]: I0128 12:43:30.462988 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" event={"ID":"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc","Type":"ContainerDied","Data":"236f39c9b6062ba2268a786dff1624ccf722978d36a785e5d3a0129ec3ad700a"} Jan 28 12:43:30 crc kubenswrapper[4685]: I0128 12:43:30.463018 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" event={"ID":"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc","Type":"ContainerStarted","Data":"6647b136885e979195257b82f797ef5bd9528528e141433bdac164a99ee55b49"} Jan 28 12:43:30 crc kubenswrapper[4685]: I0128 12:43:30.465032 4685 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:43:31 crc kubenswrapper[4685]: I0128 12:43:31.473326 4685 generic.go:334] "Generic (PLEG): container finished" podID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerID="c02e4eba55bdacb3bea8f55928712b5b834fa424fadb15c42e8674bc4286cc69" exitCode=0 Jan 28 12:43:31 crc kubenswrapper[4685]: I0128 12:43:31.473401 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" event={"ID":"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc","Type":"ContainerDied","Data":"c02e4eba55bdacb3bea8f55928712b5b834fa424fadb15c42e8674bc4286cc69"} Jan 28 12:43:32 crc kubenswrapper[4685]: I0128 12:43:32.484395 4685 generic.go:334] "Generic (PLEG): container finished" podID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerID="6c7054d34480e49032d63b2104e40a889a319265f1239071e76f2b878b686467" exitCode=0 Jan 28 12:43:32 crc kubenswrapper[4685]: I0128 12:43:32.484459 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" event={"ID":"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc","Type":"ContainerDied","Data":"6c7054d34480e49032d63b2104e40a889a319265f1239071e76f2b878b686467"} Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.788597 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.879786 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-util\") pod \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.879883 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-bundle\") pod \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.879944 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cfbc\" (UniqueName: \"kubernetes.io/projected/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-kube-api-access-9cfbc\") pod \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\" (UID: \"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc\") " Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.881704 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-bundle" (OuterVolumeSpecName: "bundle") pod "d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" (UID: "d4fd8a66-4af7-42c1-98e0-ed2f03d735fc"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.886133 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-kube-api-access-9cfbc" (OuterVolumeSpecName: "kube-api-access-9cfbc") pod "d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" (UID: "d4fd8a66-4af7-42c1-98e0-ed2f03d735fc"). InnerVolumeSpecName "kube-api-access-9cfbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.896931 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-util" (OuterVolumeSpecName: "util") pod "d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" (UID: "d4fd8a66-4af7-42c1-98e0-ed2f03d735fc"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.982079 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cfbc\" (UniqueName: \"kubernetes.io/projected/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-kube-api-access-9cfbc\") on node \"crc\" DevicePath \"\"" Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.982147 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:43:33 crc kubenswrapper[4685]: I0128 12:43:33.982165 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d4fd8a66-4af7-42c1-98e0-ed2f03d735fc-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:43:34 crc kubenswrapper[4685]: I0128 12:43:34.498687 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" event={"ID":"d4fd8a66-4af7-42c1-98e0-ed2f03d735fc","Type":"ContainerDied","Data":"6647b136885e979195257b82f797ef5bd9528528e141433bdac164a99ee55b49"} Jan 28 12:43:34 crc kubenswrapper[4685]: I0128 12:43:34.498726 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz" Jan 28 12:43:34 crc kubenswrapper[4685]: I0128 12:43:34.498732 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6647b136885e979195257b82f797ef5bd9528528e141433bdac164a99ee55b49" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.317189 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9"] Jan 28 12:43:43 crc kubenswrapper[4685]: E0128 12:43:43.318023 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerName="extract" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.318039 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerName="extract" Jan 28 12:43:43 crc kubenswrapper[4685]: E0128 12:43:43.318048 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerName="util" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.318055 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerName="util" Jan 28 12:43:43 crc kubenswrapper[4685]: E0128 12:43:43.318067 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerName="pull" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.318074 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerName="pull" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.318239 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4fd8a66-4af7-42c1-98e0-ed2f03d735fc" containerName="extract" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.318742 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.321211 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-dockercfg-s5hqk" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.329469 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9"] Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.516656 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgkk7\" (UniqueName: \"kubernetes.io/projected/dd150285-6b2b-446f-9983-b207a55d160d-kube-api-access-kgkk7\") pod \"rabbitmq-cluster-operator-779fc9694b-xtpr9\" (UID: \"dd150285-6b2b-446f-9983-b207a55d160d\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.618485 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgkk7\" (UniqueName: \"kubernetes.io/projected/dd150285-6b2b-446f-9983-b207a55d160d-kube-api-access-kgkk7\") pod \"rabbitmq-cluster-operator-779fc9694b-xtpr9\" (UID: \"dd150285-6b2b-446f-9983-b207a55d160d\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.637962 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgkk7\" (UniqueName: \"kubernetes.io/projected/dd150285-6b2b-446f-9983-b207a55d160d-kube-api-access-kgkk7\") pod \"rabbitmq-cluster-operator-779fc9694b-xtpr9\" (UID: \"dd150285-6b2b-446f-9983-b207a55d160d\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.676833 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" Jan 28 12:43:43 crc kubenswrapper[4685]: I0128 12:43:43.861921 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9"] Jan 28 12:43:44 crc kubenswrapper[4685]: I0128 12:43:44.561241 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" event={"ID":"dd150285-6b2b-446f-9983-b207a55d160d","Type":"ContainerStarted","Data":"3554b7baf6432ac5b292a0cf750365d7366bbfdb8e4c471afd06afd099c20da7"} Jan 28 12:43:48 crc kubenswrapper[4685]: I0128 12:43:48.602601 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" event={"ID":"dd150285-6b2b-446f-9983-b207a55d160d","Type":"ContainerStarted","Data":"14b5c62830c23d3b971df52fa93be79bc50747dc64962949d6b52fc4f1de28e0"} Jan 28 12:43:48 crc kubenswrapper[4685]: I0128 12:43:48.621685 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-xtpr9" podStartSLOduration=1.244806205 podStartE2EDuration="5.621666446s" podCreationTimestamp="2026-01-28 12:43:43 +0000 UTC" firstStartedPulling="2026-01-28 12:43:43.876923139 +0000 UTC m=+1374.964336974" lastFinishedPulling="2026-01-28 12:43:48.25378338 +0000 UTC m=+1379.341197215" observedRunningTime="2026-01-28 12:43:48.617816109 +0000 UTC m=+1379.705229944" watchObservedRunningTime="2026-01-28 12:43:48.621666446 +0000 UTC m=+1379.709080281" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.861735 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/rabbitmq-server-0"] Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.863304 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.868558 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"rabbitmq-server-conf" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.868592 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"rabbitmq-plugins-conf" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.868749 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"rabbitmq-erlang-cookie" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.868752 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"rabbitmq-default-user" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.871094 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"rabbitmq-server-dockercfg-2c67s" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.874819 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/rabbitmq-server-0"] Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964099 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964159 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964207 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964235 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964252 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnnc8\" (UniqueName: \"kubernetes.io/projected/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-kube-api-access-lnnc8\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964270 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964293 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:52 crc kubenswrapper[4685]: I0128 12:43:52.964313 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.065550 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.065923 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.065965 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.065991 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnnc8\" (UniqueName: \"kubernetes.io/projected/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-kube-api-access-lnnc8\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.066014 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.066039 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.066064 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.066116 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.066671 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.066973 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.068053 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.077009 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.077956 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.081745 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.085099 4685 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.085144 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e933a47d5c7d00ca8ec378989a4692113f7169258bddb0199773b98c8a78f97a/globalmount\"" pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.087675 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnnc8\" (UniqueName: \"kubernetes.io/projected/b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb-kube-api-access-lnnc8\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.109382 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f7044aa0-c688-4e0d-83bb-8a420762baa7\") pod \"rabbitmq-server-0\" (UID: \"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb\") " pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.186722 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.602756 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/rabbitmq-server-0"] Jan 28 12:43:53 crc kubenswrapper[4685]: I0128 12:43:53.637724 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb","Type":"ContainerStarted","Data":"f8b6ac68f1879c24bcad6dc1d0f01e2053ebd75f639523c6012cf119ce467f14"} Jan 28 12:43:57 crc kubenswrapper[4685]: I0128 12:43:57.069572 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:43:57 crc kubenswrapper[4685]: I0128 12:43:57.069963 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:43:57 crc kubenswrapper[4685]: I0128 12:43:57.070023 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:43:57 crc kubenswrapper[4685]: I0128 12:43:57.070765 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fe6797eb1526f7a98dbf830c75b37ec963fdbf0115e31ebdafc20e877843581c"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:43:57 crc kubenswrapper[4685]: I0128 12:43:57.070846 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://fe6797eb1526f7a98dbf830c75b37ec963fdbf0115e31ebdafc20e877843581c" gracePeriod=600 Jan 28 12:43:58 crc kubenswrapper[4685]: I0128 12:43:58.670704 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="fe6797eb1526f7a98dbf830c75b37ec963fdbf0115e31ebdafc20e877843581c" exitCode=0 Jan 28 12:43:58 crc kubenswrapper[4685]: I0128 12:43:58.670757 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"fe6797eb1526f7a98dbf830c75b37ec963fdbf0115e31ebdafc20e877843581c"} Jan 28 12:43:58 crc kubenswrapper[4685]: I0128 12:43:58.671094 4685 scope.go:117] "RemoveContainer" containerID="62facba6395aa0c2d16f630a44d637954256b42216e7c75dfe642554c0dcdb37" Jan 28 12:43:59 crc kubenswrapper[4685]: I0128 12:43:59.681396 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65"} Jan 28 12:43:59 crc kubenswrapper[4685]: I0128 12:43:59.791543 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-index-5m4xf"] Jan 28 12:43:59 crc kubenswrapper[4685]: I0128 12:43:59.792606 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:43:59 crc kubenswrapper[4685]: I0128 12:43:59.794974 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-index-dockercfg-jzh5q" Jan 28 12:43:59 crc kubenswrapper[4685]: I0128 12:43:59.798461 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-5m4xf"] Jan 28 12:43:59 crc kubenswrapper[4685]: I0128 12:43:59.960739 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5g54\" (UniqueName: \"kubernetes.io/projected/cd4ba4c3-6264-4365-a295-68b73a5efa21-kube-api-access-b5g54\") pod \"keystone-operator-index-5m4xf\" (UID: \"cd4ba4c3-6264-4365-a295-68b73a5efa21\") " pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:00 crc kubenswrapper[4685]: I0128 12:44:00.062926 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5g54\" (UniqueName: \"kubernetes.io/projected/cd4ba4c3-6264-4365-a295-68b73a5efa21-kube-api-access-b5g54\") pod \"keystone-operator-index-5m4xf\" (UID: \"cd4ba4c3-6264-4365-a295-68b73a5efa21\") " pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:00 crc kubenswrapper[4685]: I0128 12:44:00.084914 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5g54\" (UniqueName: \"kubernetes.io/projected/cd4ba4c3-6264-4365-a295-68b73a5efa21-kube-api-access-b5g54\") pod \"keystone-operator-index-5m4xf\" (UID: \"cd4ba4c3-6264-4365-a295-68b73a5efa21\") " pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:00 crc kubenswrapper[4685]: I0128 12:44:00.113397 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:00 crc kubenswrapper[4685]: I0128 12:44:00.294804 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-5m4xf"] Jan 28 12:44:00 crc kubenswrapper[4685]: W0128 12:44:00.310521 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd4ba4c3_6264_4365_a295_68b73a5efa21.slice/crio-a1d63d26e0f5751caf668055a9e0cfbf3ee3dba30a1195f920d05c6ab4ca36cb WatchSource:0}: Error finding container a1d63d26e0f5751caf668055a9e0cfbf3ee3dba30a1195f920d05c6ab4ca36cb: Status 404 returned error can't find the container with id a1d63d26e0f5751caf668055a9e0cfbf3ee3dba30a1195f920d05c6ab4ca36cb Jan 28 12:44:00 crc kubenswrapper[4685]: I0128 12:44:00.692921 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-5m4xf" event={"ID":"cd4ba4c3-6264-4365-a295-68b73a5efa21","Type":"ContainerStarted","Data":"a1d63d26e0f5751caf668055a9e0cfbf3ee3dba30a1195f920d05c6ab4ca36cb"} Jan 28 12:44:06 crc kubenswrapper[4685]: I0128 12:44:06.728337 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-5m4xf" event={"ID":"cd4ba4c3-6264-4365-a295-68b73a5efa21","Type":"ContainerStarted","Data":"9386a406a94fdf05155eb83264f8632d4b290e0fed310a670869a6a422e5e78d"} Jan 28 12:44:06 crc kubenswrapper[4685]: I0128 12:44:06.745332 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-index-5m4xf" podStartSLOduration=2.334155595 podStartE2EDuration="7.745306944s" podCreationTimestamp="2026-01-28 12:43:59 +0000 UTC" firstStartedPulling="2026-01-28 12:44:00.313730833 +0000 UTC m=+1391.401144668" lastFinishedPulling="2026-01-28 12:44:05.724882182 +0000 UTC m=+1396.812296017" observedRunningTime="2026-01-28 12:44:06.740556532 +0000 UTC m=+1397.827970387" watchObservedRunningTime="2026-01-28 12:44:06.745306944 +0000 UTC m=+1397.832720799" Jan 28 12:44:07 crc kubenswrapper[4685]: I0128 12:44:07.747566 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb","Type":"ContainerStarted","Data":"7080014cac2b634408350e36c628719518c5cd21147cb32713c4e2dff3d3d110"} Jan 28 12:44:10 crc kubenswrapper[4685]: I0128 12:44:10.114822 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:10 crc kubenswrapper[4685]: I0128 12:44:10.115422 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:10 crc kubenswrapper[4685]: I0128 12:44:10.155841 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:10 crc kubenswrapper[4685]: I0128 12:44:10.806122 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-index-5m4xf" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.627488 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd"] Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.629350 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.636978 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-dvgnb" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.643517 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd"] Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.759915 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.759997 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9x4j\" (UniqueName: \"kubernetes.io/projected/28604166-5316-49b0-acf2-c59881d7d29c-kube-api-access-l9x4j\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.760026 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.861265 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.861323 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9x4j\" (UniqueName: \"kubernetes.io/projected/28604166-5316-49b0-acf2-c59881d7d29c-kube-api-access-l9x4j\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.861357 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.862019 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.862026 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.886407 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9x4j\" (UniqueName: \"kubernetes.io/projected/28604166-5316-49b0-acf2-c59881d7d29c-kube-api-access-l9x4j\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:19 crc kubenswrapper[4685]: I0128 12:44:19.955333 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:20 crc kubenswrapper[4685]: I0128 12:44:20.180586 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd"] Jan 28 12:44:20 crc kubenswrapper[4685]: W0128 12:44:20.207483 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28604166_5316_49b0_acf2_c59881d7d29c.slice/crio-1a3fc23ba58bda798e52748b3ddc0ce36a6d999a2399bcd46f2e3c80efa48157 WatchSource:0}: Error finding container 1a3fc23ba58bda798e52748b3ddc0ce36a6d999a2399bcd46f2e3c80efa48157: Status 404 returned error can't find the container with id 1a3fc23ba58bda798e52748b3ddc0ce36a6d999a2399bcd46f2e3c80efa48157 Jan 28 12:44:20 crc kubenswrapper[4685]: I0128 12:44:20.850928 4685 generic.go:334] "Generic (PLEG): container finished" podID="28604166-5316-49b0-acf2-c59881d7d29c" containerID="0b252afdc2a3f760df61afd4f982d0aac9d04f4e8721d643f498757c6f366430" exitCode=0 Jan 28 12:44:20 crc kubenswrapper[4685]: I0128 12:44:20.850975 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" event={"ID":"28604166-5316-49b0-acf2-c59881d7d29c","Type":"ContainerDied","Data":"0b252afdc2a3f760df61afd4f982d0aac9d04f4e8721d643f498757c6f366430"} Jan 28 12:44:20 crc kubenswrapper[4685]: I0128 12:44:20.851014 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" event={"ID":"28604166-5316-49b0-acf2-c59881d7d29c","Type":"ContainerStarted","Data":"1a3fc23ba58bda798e52748b3ddc0ce36a6d999a2399bcd46f2e3c80efa48157"} Jan 28 12:44:25 crc kubenswrapper[4685]: I0128 12:44:25.885208 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" event={"ID":"28604166-5316-49b0-acf2-c59881d7d29c","Type":"ContainerStarted","Data":"5a81e334b056c5e0c144d84eef8300c3d8841cdf6b444935c7b81af25b4bb003"} Jan 28 12:44:26 crc kubenswrapper[4685]: I0128 12:44:26.893286 4685 generic.go:334] "Generic (PLEG): container finished" podID="28604166-5316-49b0-acf2-c59881d7d29c" containerID="5a81e334b056c5e0c144d84eef8300c3d8841cdf6b444935c7b81af25b4bb003" exitCode=0 Jan 28 12:44:26 crc kubenswrapper[4685]: I0128 12:44:26.893334 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" event={"ID":"28604166-5316-49b0-acf2-c59881d7d29c","Type":"ContainerDied","Data":"5a81e334b056c5e0c144d84eef8300c3d8841cdf6b444935c7b81af25b4bb003"} Jan 28 12:44:28 crc kubenswrapper[4685]: I0128 12:44:28.909439 4685 generic.go:334] "Generic (PLEG): container finished" podID="28604166-5316-49b0-acf2-c59881d7d29c" containerID="2b50ffdfb9939037c629dd780da548b88ecaf140aaa006d667dee43dbc8e4e28" exitCode=0 Jan 28 12:44:28 crc kubenswrapper[4685]: I0128 12:44:28.909494 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" event={"ID":"28604166-5316-49b0-acf2-c59881d7d29c","Type":"ContainerDied","Data":"2b50ffdfb9939037c629dd780da548b88ecaf140aaa006d667dee43dbc8e4e28"} Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.225781 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.325645 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-util\") pod \"28604166-5316-49b0-acf2-c59881d7d29c\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.326343 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-bundle\") pod \"28604166-5316-49b0-acf2-c59881d7d29c\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.326478 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9x4j\" (UniqueName: \"kubernetes.io/projected/28604166-5316-49b0-acf2-c59881d7d29c-kube-api-access-l9x4j\") pod \"28604166-5316-49b0-acf2-c59881d7d29c\" (UID: \"28604166-5316-49b0-acf2-c59881d7d29c\") " Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.327162 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-bundle" (OuterVolumeSpecName: "bundle") pod "28604166-5316-49b0-acf2-c59881d7d29c" (UID: "28604166-5316-49b0-acf2-c59881d7d29c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.334565 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28604166-5316-49b0-acf2-c59881d7d29c-kube-api-access-l9x4j" (OuterVolumeSpecName: "kube-api-access-l9x4j") pod "28604166-5316-49b0-acf2-c59881d7d29c" (UID: "28604166-5316-49b0-acf2-c59881d7d29c"). InnerVolumeSpecName "kube-api-access-l9x4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.338163 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-util" (OuterVolumeSpecName: "util") pod "28604166-5316-49b0-acf2-c59881d7d29c" (UID: "28604166-5316-49b0-acf2-c59881d7d29c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.427557 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.427591 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28604166-5316-49b0-acf2-c59881d7d29c-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.427601 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9x4j\" (UniqueName: \"kubernetes.io/projected/28604166-5316-49b0-acf2-c59881d7d29c-kube-api-access-l9x4j\") on node \"crc\" DevicePath \"\"" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.927060 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" event={"ID":"28604166-5316-49b0-acf2-c59881d7d29c","Type":"ContainerDied","Data":"1a3fc23ba58bda798e52748b3ddc0ce36a6d999a2399bcd46f2e3c80efa48157"} Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.927116 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a3fc23ba58bda798e52748b3ddc0ce36a6d999a2399bcd46f2e3c80efa48157" Jan 28 12:44:30 crc kubenswrapper[4685]: I0128 12:44:30.927585 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.867374 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96"] Jan 28 12:44:39 crc kubenswrapper[4685]: E0128 12:44:39.868346 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28604166-5316-49b0-acf2-c59881d7d29c" containerName="pull" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.868363 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="28604166-5316-49b0-acf2-c59881d7d29c" containerName="pull" Jan 28 12:44:39 crc kubenswrapper[4685]: E0128 12:44:39.868389 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28604166-5316-49b0-acf2-c59881d7d29c" containerName="extract" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.868396 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="28604166-5316-49b0-acf2-c59881d7d29c" containerName="extract" Jan 28 12:44:39 crc kubenswrapper[4685]: E0128 12:44:39.868410 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28604166-5316-49b0-acf2-c59881d7d29c" containerName="util" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.868418 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="28604166-5316-49b0-acf2-c59881d7d29c" containerName="util" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.868556 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="28604166-5316-49b0-acf2-c59881d7d29c" containerName="extract" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.869097 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.874825 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-service-cert" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.874884 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-mhrzh" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.879883 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96"] Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.959889 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgdx8\" (UniqueName: \"kubernetes.io/projected/7c286e16-1bf9-411c-9787-5a51a2ca1adc-kube-api-access-kgdx8\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.959955 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7c286e16-1bf9-411c-9787-5a51a2ca1adc-webhook-cert\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:39 crc kubenswrapper[4685]: I0128 12:44:39.959981 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7c286e16-1bf9-411c-9787-5a51a2ca1adc-apiservice-cert\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.060814 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgdx8\" (UniqueName: \"kubernetes.io/projected/7c286e16-1bf9-411c-9787-5a51a2ca1adc-kube-api-access-kgdx8\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.061105 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7c286e16-1bf9-411c-9787-5a51a2ca1adc-webhook-cert\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.061248 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7c286e16-1bf9-411c-9787-5a51a2ca1adc-apiservice-cert\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.066837 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7c286e16-1bf9-411c-9787-5a51a2ca1adc-apiservice-cert\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.067154 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7c286e16-1bf9-411c-9787-5a51a2ca1adc-webhook-cert\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.078795 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgdx8\" (UniqueName: \"kubernetes.io/projected/7c286e16-1bf9-411c-9787-5a51a2ca1adc-kube-api-access-kgdx8\") pod \"keystone-operator-controller-manager-7bbbd9668d-jpf96\" (UID: \"7c286e16-1bf9-411c-9787-5a51a2ca1adc\") " pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.184501 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.588484 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96"] Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.997762 4685 generic.go:334] "Generic (PLEG): container finished" podID="b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb" containerID="7080014cac2b634408350e36c628719518c5cd21147cb32713c4e2dff3d3d110" exitCode=0 Jan 28 12:44:40 crc kubenswrapper[4685]: I0128 12:44:40.998012 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb","Type":"ContainerDied","Data":"7080014cac2b634408350e36c628719518c5cd21147cb32713c4e2dff3d3d110"} Jan 28 12:44:41 crc kubenswrapper[4685]: I0128 12:44:41.001120 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" event={"ID":"7c286e16-1bf9-411c-9787-5a51a2ca1adc","Type":"ContainerStarted","Data":"8bcf63e792cdc078a6f290b8c0e81cb9b1a8305be68317668faa3d4e6c4c474b"} Jan 28 12:44:45 crc kubenswrapper[4685]: I0128 12:44:45.038970 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/rabbitmq-server-0" event={"ID":"b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb","Type":"ContainerStarted","Data":"f608909a5967db545e62063b341c918c4bd30edf82c508e38e91f1e8ee5ae145"} Jan 28 12:44:46 crc kubenswrapper[4685]: I0128 12:44:46.049695 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:44:50 crc kubenswrapper[4685]: I0128 12:44:50.575662 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/rabbitmq-server-0" podStartSLOduration=47.22935527 podStartE2EDuration="59.57563977s" podCreationTimestamp="2026-01-28 12:43:51 +0000 UTC" firstStartedPulling="2026-01-28 12:43:53.612259223 +0000 UTC m=+1384.699673058" lastFinishedPulling="2026-01-28 12:44:05.958543723 +0000 UTC m=+1397.045957558" observedRunningTime="2026-01-28 12:44:46.089084118 +0000 UTC m=+1437.176497963" watchObservedRunningTime="2026-01-28 12:44:50.57563977 +0000 UTC m=+1441.663053635" Jan 28 12:44:57 crc kubenswrapper[4685]: I0128 12:44:57.113456 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" event={"ID":"7c286e16-1bf9-411c-9787-5a51a2ca1adc","Type":"ContainerStarted","Data":"cf64a726207ed5a412e83e1deebc89a3330e14e97b02cbd9f13f419c83201fa1"} Jan 28 12:44:58 crc kubenswrapper[4685]: I0128 12:44:58.120752 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:44:58 crc kubenswrapper[4685]: I0128 12:44:58.150588 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" podStartSLOduration=3.057564155 podStartE2EDuration="19.150560415s" podCreationTimestamp="2026-01-28 12:44:39 +0000 UTC" firstStartedPulling="2026-01-28 12:44:40.628254967 +0000 UTC m=+1431.715668812" lastFinishedPulling="2026-01-28 12:44:56.721251207 +0000 UTC m=+1447.808665072" observedRunningTime="2026-01-28 12:44:58.141994377 +0000 UTC m=+1449.229408212" watchObservedRunningTime="2026-01-28 12:44:58.150560415 +0000 UTC m=+1449.237974260" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.144573 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh"] Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.145872 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.149577 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.150066 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.155683 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh"] Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.310401 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fbc9108-75c8-402a-8604-362b0ac43eb3-secret-volume\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.310632 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fbc9108-75c8-402a-8604-362b0ac43eb3-config-volume\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.310717 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s46xf\" (UniqueName: \"kubernetes.io/projected/0fbc9108-75c8-402a-8604-362b0ac43eb3-kube-api-access-s46xf\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.412236 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fbc9108-75c8-402a-8604-362b0ac43eb3-secret-volume\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.412378 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fbc9108-75c8-402a-8604-362b0ac43eb3-config-volume\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.412448 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s46xf\" (UniqueName: \"kubernetes.io/projected/0fbc9108-75c8-402a-8604-362b0ac43eb3-kube-api-access-s46xf\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.416217 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fbc9108-75c8-402a-8604-362b0ac43eb3-config-volume\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.421694 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fbc9108-75c8-402a-8604-362b0ac43eb3-secret-volume\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.448786 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s46xf\" (UniqueName: \"kubernetes.io/projected/0fbc9108-75c8-402a-8604-362b0ac43eb3-kube-api-access-s46xf\") pod \"collect-profiles-29493405-hbrvh\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.475239 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:00 crc kubenswrapper[4685]: I0128 12:45:00.743500 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh"] Jan 28 12:45:00 crc kubenswrapper[4685]: W0128 12:45:00.748430 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0fbc9108_75c8_402a_8604_362b0ac43eb3.slice/crio-47f802d8ebd2fea195c851a970af903766b17300fea33159349912211017ebdd WatchSource:0}: Error finding container 47f802d8ebd2fea195c851a970af903766b17300fea33159349912211017ebdd: Status 404 returned error can't find the container with id 47f802d8ebd2fea195c851a970af903766b17300fea33159349912211017ebdd Jan 28 12:45:01 crc kubenswrapper[4685]: I0128 12:45:01.149967 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" event={"ID":"0fbc9108-75c8-402a-8604-362b0ac43eb3","Type":"ContainerStarted","Data":"47f802d8ebd2fea195c851a970af903766b17300fea33159349912211017ebdd"} Jan 28 12:45:02 crc kubenswrapper[4685]: I0128 12:45:02.159668 4685 generic.go:334] "Generic (PLEG): container finished" podID="0fbc9108-75c8-402a-8604-362b0ac43eb3" containerID="323d78e833363a5f5ea1d185c3646053c0db175474110b157d1546ff0e284d7d" exitCode=0 Jan 28 12:45:02 crc kubenswrapper[4685]: I0128 12:45:02.159717 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" event={"ID":"0fbc9108-75c8-402a-8604-362b0ac43eb3","Type":"ContainerDied","Data":"323d78e833363a5f5ea1d185c3646053c0db175474110b157d1546ff0e284d7d"} Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.192955 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/rabbitmq-server-0" Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.474723 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.555044 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fbc9108-75c8-402a-8604-362b0ac43eb3-config-volume\") pod \"0fbc9108-75c8-402a-8604-362b0ac43eb3\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.555134 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fbc9108-75c8-402a-8604-362b0ac43eb3-secret-volume\") pod \"0fbc9108-75c8-402a-8604-362b0ac43eb3\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.555258 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s46xf\" (UniqueName: \"kubernetes.io/projected/0fbc9108-75c8-402a-8604-362b0ac43eb3-kube-api-access-s46xf\") pod \"0fbc9108-75c8-402a-8604-362b0ac43eb3\" (UID: \"0fbc9108-75c8-402a-8604-362b0ac43eb3\") " Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.555825 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fbc9108-75c8-402a-8604-362b0ac43eb3-config-volume" (OuterVolumeSpecName: "config-volume") pod "0fbc9108-75c8-402a-8604-362b0ac43eb3" (UID: "0fbc9108-75c8-402a-8604-362b0ac43eb3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.562996 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fbc9108-75c8-402a-8604-362b0ac43eb3-kube-api-access-s46xf" (OuterVolumeSpecName: "kube-api-access-s46xf") pod "0fbc9108-75c8-402a-8604-362b0ac43eb3" (UID: "0fbc9108-75c8-402a-8604-362b0ac43eb3"). InnerVolumeSpecName "kube-api-access-s46xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.569417 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fbc9108-75c8-402a-8604-362b0ac43eb3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0fbc9108-75c8-402a-8604-362b0ac43eb3" (UID: "0fbc9108-75c8-402a-8604-362b0ac43eb3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.656749 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s46xf\" (UniqueName: \"kubernetes.io/projected/0fbc9108-75c8-402a-8604-362b0ac43eb3-kube-api-access-s46xf\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.656795 4685 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0fbc9108-75c8-402a-8604-362b0ac43eb3-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:03 crc kubenswrapper[4685]: I0128 12:45:03.656809 4685 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0fbc9108-75c8-402a-8604-362b0ac43eb3-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:04 crc kubenswrapper[4685]: I0128 12:45:04.174946 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" event={"ID":"0fbc9108-75c8-402a-8604-362b0ac43eb3","Type":"ContainerDied","Data":"47f802d8ebd2fea195c851a970af903766b17300fea33159349912211017ebdd"} Jan 28 12:45:04 crc kubenswrapper[4685]: I0128 12:45:04.174982 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47f802d8ebd2fea195c851a970af903766b17300fea33159349912211017ebdd" Jan 28 12:45:04 crc kubenswrapper[4685]: I0128 12:45:04.174990 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493405-hbrvh" Jan 28 12:45:10 crc kubenswrapper[4685]: I0128 12:45:10.187969 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7bbbd9668d-jpf96" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.842912 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-db-create-6nbgl"] Jan 28 12:45:12 crc kubenswrapper[4685]: E0128 12:45:12.843419 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fbc9108-75c8-402a-8604-362b0ac43eb3" containerName="collect-profiles" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.843433 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fbc9108-75c8-402a-8604-362b0ac43eb3" containerName="collect-profiles" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.843537 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fbc9108-75c8-402a-8604-362b0ac43eb3" containerName="collect-profiles" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.843941 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.857051 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-9121-account-create-update-xl2qc"] Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.858016 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.885890 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-db-secret" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.902233 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-create-6nbgl"] Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.925367 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-9121-account-create-update-xl2qc"] Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.997369 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-operator-scripts\") pod \"keystone-db-create-6nbgl\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.997437 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr4hv\" (UniqueName: \"kubernetes.io/projected/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-kube-api-access-sr4hv\") pod \"keystone-9121-account-create-update-xl2qc\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.997547 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-operator-scripts\") pod \"keystone-9121-account-create-update-xl2qc\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:12 crc kubenswrapper[4685]: I0128 12:45:12.997577 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlgpz\" (UniqueName: \"kubernetes.io/projected/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-kube-api-access-jlgpz\") pod \"keystone-db-create-6nbgl\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.099830 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-operator-scripts\") pod \"keystone-9121-account-create-update-xl2qc\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.099883 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlgpz\" (UniqueName: \"kubernetes.io/projected/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-kube-api-access-jlgpz\") pod \"keystone-db-create-6nbgl\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.099940 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-operator-scripts\") pod \"keystone-db-create-6nbgl\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.099962 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr4hv\" (UniqueName: \"kubernetes.io/projected/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-kube-api-access-sr4hv\") pod \"keystone-9121-account-create-update-xl2qc\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.101521 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-operator-scripts\") pod \"keystone-9121-account-create-update-xl2qc\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.102532 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-operator-scripts\") pod \"keystone-db-create-6nbgl\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.122756 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr4hv\" (UniqueName: \"kubernetes.io/projected/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-kube-api-access-sr4hv\") pod \"keystone-9121-account-create-update-xl2qc\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.135593 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlgpz\" (UniqueName: \"kubernetes.io/projected/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-kube-api-access-jlgpz\") pod \"keystone-db-create-6nbgl\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.226745 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.235269 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.533893 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-9121-account-create-update-xl2qc"] Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.688902 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-create-6nbgl"] Jan 28 12:45:13 crc kubenswrapper[4685]: W0128 12:45:13.690611 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9126ced2_36b8_48c1_97e2_22a4d5b69ec9.slice/crio-ecd858747ff19ee11895d900bd5e205aa29e30c0265d4b1566c36820449f40de WatchSource:0}: Error finding container ecd858747ff19ee11895d900bd5e205aa29e30c0265d4b1566c36820449f40de: Status 404 returned error can't find the container with id ecd858747ff19ee11895d900bd5e205aa29e30c0265d4b1566c36820449f40de Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.786029 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-index-srqv2"] Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.786880 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-srqv2" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.789124 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-index-dockercfg-vq77g" Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.796834 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-index-srqv2"] Jan 28 12:45:13 crc kubenswrapper[4685]: I0128 12:45:13.909749 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc2qv\" (UniqueName: \"kubernetes.io/projected/1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8-kube-api-access-fc2qv\") pod \"horizon-operator-index-srqv2\" (UID: \"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8\") " pod="openstack-operators/horizon-operator-index-srqv2" Jan 28 12:45:14 crc kubenswrapper[4685]: I0128 12:45:14.011492 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc2qv\" (UniqueName: \"kubernetes.io/projected/1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8-kube-api-access-fc2qv\") pod \"horizon-operator-index-srqv2\" (UID: \"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8\") " pod="openstack-operators/horizon-operator-index-srqv2" Jan 28 12:45:14 crc kubenswrapper[4685]: I0128 12:45:14.041175 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc2qv\" (UniqueName: \"kubernetes.io/projected/1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8-kube-api-access-fc2qv\") pod \"horizon-operator-index-srqv2\" (UID: \"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8\") " pod="openstack-operators/horizon-operator-index-srqv2" Jan 28 12:45:14 crc kubenswrapper[4685]: I0128 12:45:14.109211 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-srqv2" Jan 28 12:45:14 crc kubenswrapper[4685]: I0128 12:45:14.240537 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-6nbgl" event={"ID":"9126ced2-36b8-48c1-97e2-22a4d5b69ec9","Type":"ContainerStarted","Data":"ecd858747ff19ee11895d900bd5e205aa29e30c0265d4b1566c36820449f40de"} Jan 28 12:45:14 crc kubenswrapper[4685]: I0128 12:45:14.244830 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" event={"ID":"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9","Type":"ContainerStarted","Data":"b71fd010e1c827e0d3432c6fbbcbbd4e516305c3f31b5ec897d43ac7a693c844"} Jan 28 12:45:14 crc kubenswrapper[4685]: I0128 12:45:14.327327 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-index-srqv2"] Jan 28 12:45:15 crc kubenswrapper[4685]: I0128 12:45:15.252843 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" event={"ID":"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9","Type":"ContainerStarted","Data":"1da72d9f4c747d5cce89036a384f1636bf58722ca89158cb889e49ad148d5e39"} Jan 28 12:45:15 crc kubenswrapper[4685]: I0128 12:45:15.254357 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-srqv2" event={"ID":"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8","Type":"ContainerStarted","Data":"187bb09cf036ecb465c6f57eda87be7695bf014c7b03f582fb21915d511060b7"} Jan 28 12:45:15 crc kubenswrapper[4685]: I0128 12:45:15.255382 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-6nbgl" event={"ID":"9126ced2-36b8-48c1-97e2-22a4d5b69ec9","Type":"ContainerStarted","Data":"b7bec0b6ca776dc6d4f57e6880bfa3cd301c90bb12b1346a3e404634644bb00f"} Jan 28 12:45:15 crc kubenswrapper[4685]: I0128 12:45:15.277359 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" podStartSLOduration=3.277332007 podStartE2EDuration="3.277332007s" podCreationTimestamp="2026-01-28 12:45:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:45:15.2677576 +0000 UTC m=+1466.355171475" watchObservedRunningTime="2026-01-28 12:45:15.277332007 +0000 UTC m=+1466.364745862" Jan 28 12:45:15 crc kubenswrapper[4685]: I0128 12:45:15.287337 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-db-create-6nbgl" podStartSLOduration=3.287320465 podStartE2EDuration="3.287320465s" podCreationTimestamp="2026-01-28 12:45:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:45:15.28570451 +0000 UTC m=+1466.373118345" watchObservedRunningTime="2026-01-28 12:45:15.287320465 +0000 UTC m=+1466.374734300" Jan 28 12:45:17 crc kubenswrapper[4685]: I0128 12:45:17.268890 4685 generic.go:334] "Generic (PLEG): container finished" podID="9126ced2-36b8-48c1-97e2-22a4d5b69ec9" containerID="b7bec0b6ca776dc6d4f57e6880bfa3cd301c90bb12b1346a3e404634644bb00f" exitCode=0 Jan 28 12:45:17 crc kubenswrapper[4685]: I0128 12:45:17.268975 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-6nbgl" event={"ID":"9126ced2-36b8-48c1-97e2-22a4d5b69ec9","Type":"ContainerDied","Data":"b7bec0b6ca776dc6d4f57e6880bfa3cd301c90bb12b1346a3e404634644bb00f"} Jan 28 12:45:18 crc kubenswrapper[4685]: I0128 12:45:18.775075 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/horizon-operator-index-srqv2"] Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.184834 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-index-97nxq"] Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.185808 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.189024 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-index-dockercfg-wspjk" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.192770 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-97nxq"] Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.287300 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7c9j\" (UniqueName: \"kubernetes.io/projected/fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec-kube-api-access-k7c9j\") pod \"swift-operator-index-97nxq\" (UID: \"fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec\") " pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.388995 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7c9j\" (UniqueName: \"kubernetes.io/projected/fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec-kube-api-access-k7c9j\") pod \"swift-operator-index-97nxq\" (UID: \"fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec\") " pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.407045 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7c9j\" (UniqueName: \"kubernetes.io/projected/fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec-kube-api-access-k7c9j\") pod \"swift-operator-index-97nxq\" (UID: \"fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec\") " pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.515322 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.589469 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-index-qjv59"] Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.590749 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.598331 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-index-qjv59"] Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.693767 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6z4h\" (UniqueName: \"kubernetes.io/projected/46e31e61-86e7-4c28-a833-e524a0220612-kube-api-access-l6z4h\") pod \"horizon-operator-index-qjv59\" (UID: \"46e31e61-86e7-4c28-a833-e524a0220612\") " pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.795132 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6z4h\" (UniqueName: \"kubernetes.io/projected/46e31e61-86e7-4c28-a833-e524a0220612-kube-api-access-l6z4h\") pod \"horizon-operator-index-qjv59\" (UID: \"46e31e61-86e7-4c28-a833-e524a0220612\") " pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.813021 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6z4h\" (UniqueName: \"kubernetes.io/projected/46e31e61-86e7-4c28-a833-e524a0220612-kube-api-access-l6z4h\") pod \"horizon-operator-index-qjv59\" (UID: \"46e31e61-86e7-4c28-a833-e524a0220612\") " pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:19 crc kubenswrapper[4685]: I0128 12:45:19.911795 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:20 crc kubenswrapper[4685]: I0128 12:45:20.907922 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.012010 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-operator-scripts\") pod \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.012114 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlgpz\" (UniqueName: \"kubernetes.io/projected/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-kube-api-access-jlgpz\") pod \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\" (UID: \"9126ced2-36b8-48c1-97e2-22a4d5b69ec9\") " Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.012950 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9126ced2-36b8-48c1-97e2-22a4d5b69ec9" (UID: "9126ced2-36b8-48c1-97e2-22a4d5b69ec9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.013323 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.017588 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-kube-api-access-jlgpz" (OuterVolumeSpecName: "kube-api-access-jlgpz") pod "9126ced2-36b8-48c1-97e2-22a4d5b69ec9" (UID: "9126ced2-36b8-48c1-97e2-22a4d5b69ec9"). InnerVolumeSpecName "kube-api-access-jlgpz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.114867 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlgpz\" (UniqueName: \"kubernetes.io/projected/9126ced2-36b8-48c1-97e2-22a4d5b69ec9-kube-api-access-jlgpz\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.299386 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-create-6nbgl" event={"ID":"9126ced2-36b8-48c1-97e2-22a4d5b69ec9","Type":"ContainerDied","Data":"ecd858747ff19ee11895d900bd5e205aa29e30c0265d4b1566c36820449f40de"} Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.299444 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ecd858747ff19ee11895d900bd5e205aa29e30c0265d4b1566c36820449f40de" Jan 28 12:45:21 crc kubenswrapper[4685]: I0128 12:45:21.299535 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-create-6nbgl" Jan 28 12:45:25 crc kubenswrapper[4685]: I0128 12:45:25.349142 4685 generic.go:334] "Generic (PLEG): container finished" podID="5aa289a8-1ea3-4559-8d70-cab87d8fd9d9" containerID="1da72d9f4c747d5cce89036a384f1636bf58722ca89158cb889e49ad148d5e39" exitCode=0 Jan 28 12:45:25 crc kubenswrapper[4685]: I0128 12:45:25.349209 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" event={"ID":"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9","Type":"ContainerDied","Data":"1da72d9f4c747d5cce89036a384f1636bf58722ca89158cb889e49ad148d5e39"} Jan 28 12:45:25 crc kubenswrapper[4685]: I0128 12:45:25.446296 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-index-qjv59"] Jan 28 12:45:25 crc kubenswrapper[4685]: W0128 12:45:25.452345 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46e31e61_86e7_4c28_a833_e524a0220612.slice/crio-7e41226591247e21d9a6d9c1b845daf0d76d276f9cba1e04922877b0dd756768 WatchSource:0}: Error finding container 7e41226591247e21d9a6d9c1b845daf0d76d276f9cba1e04922877b0dd756768: Status 404 returned error can't find the container with id 7e41226591247e21d9a6d9c1b845daf0d76d276f9cba1e04922877b0dd756768 Jan 28 12:45:25 crc kubenswrapper[4685]: I0128 12:45:25.496955 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-97nxq"] Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.357086 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-97nxq" event={"ID":"fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec","Type":"ContainerStarted","Data":"66316b18d7a551c79e04a7f79929658e2f5396cc34d5d444dc999a6e21c2f38c"} Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.358646 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-qjv59" event={"ID":"46e31e61-86e7-4c28-a833-e524a0220612","Type":"ContainerStarted","Data":"f8022a7765dffd7e5936db0c4a5cdc8513933bc79383d2f0255a17bd30d7ba27"} Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.358693 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-qjv59" event={"ID":"46e31e61-86e7-4c28-a833-e524a0220612","Type":"ContainerStarted","Data":"7e41226591247e21d9a6d9c1b845daf0d76d276f9cba1e04922877b0dd756768"} Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.360555 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-srqv2" event={"ID":"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8","Type":"ContainerStarted","Data":"c4b2c1830000e7470ac3781f6da8d877a4aee4629aee724c3f0736445baa6076"} Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.360603 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/horizon-operator-index-srqv2" podUID="1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8" containerName="registry-server" containerID="cri-o://c4b2c1830000e7470ac3781f6da8d877a4aee4629aee724c3f0736445baa6076" gracePeriod=2 Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.377777 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-index-qjv59" podStartSLOduration=6.878965154 podStartE2EDuration="7.377752282s" podCreationTimestamp="2026-01-28 12:45:19 +0000 UTC" firstStartedPulling="2026-01-28 12:45:25.454450093 +0000 UTC m=+1476.541863928" lastFinishedPulling="2026-01-28 12:45:25.953237221 +0000 UTC m=+1477.040651056" observedRunningTime="2026-01-28 12:45:26.371665913 +0000 UTC m=+1477.459079748" watchObservedRunningTime="2026-01-28 12:45:26.377752282 +0000 UTC m=+1477.465166117" Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.394009 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-index-srqv2" podStartSLOduration=1.871242729 podStartE2EDuration="13.393989614s" podCreationTimestamp="2026-01-28 12:45:13 +0000 UTC" firstStartedPulling="2026-01-28 12:45:14.325963617 +0000 UTC m=+1465.413377452" lastFinishedPulling="2026-01-28 12:45:25.848710482 +0000 UTC m=+1476.936124337" observedRunningTime="2026-01-28 12:45:26.391928447 +0000 UTC m=+1477.479342282" watchObservedRunningTime="2026-01-28 12:45:26.393989614 +0000 UTC m=+1477.481403449" Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.671510 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.797072 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sr4hv\" (UniqueName: \"kubernetes.io/projected/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-kube-api-access-sr4hv\") pod \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.797165 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-operator-scripts\") pod \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\" (UID: \"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9\") " Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.798712 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5aa289a8-1ea3-4559-8d70-cab87d8fd9d9" (UID: "5aa289a8-1ea3-4559-8d70-cab87d8fd9d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:45:26 crc kubenswrapper[4685]: I0128 12:45:26.899392 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.623396 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" event={"ID":"5aa289a8-1ea3-4559-8d70-cab87d8fd9d9","Type":"ContainerDied","Data":"b71fd010e1c827e0d3432c6fbbcbbd4e516305c3f31b5ec897d43ac7a693c844"} Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.623438 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b71fd010e1c827e0d3432c6fbbcbbd4e516305c3f31b5ec897d43ac7a693c844" Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.623501 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-9121-account-create-update-xl2qc" Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.625339 4685 generic.go:334] "Generic (PLEG): container finished" podID="1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8" containerID="c4b2c1830000e7470ac3781f6da8d877a4aee4629aee724c3f0736445baa6076" exitCode=0 Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.625949 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-srqv2" event={"ID":"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8","Type":"ContainerDied","Data":"c4b2c1830000e7470ac3781f6da8d877a4aee4629aee724c3f0736445baa6076"} Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.835357 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-kube-api-access-sr4hv" (OuterVolumeSpecName: "kube-api-access-sr4hv") pod "5aa289a8-1ea3-4559-8d70-cab87d8fd9d9" (UID: "5aa289a8-1ea3-4559-8d70-cab87d8fd9d9"). InnerVolumeSpecName "kube-api-access-sr4hv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.844613 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sr4hv\" (UniqueName: \"kubernetes.io/projected/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9-kube-api-access-sr4hv\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.917895 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-srqv2" Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.944886 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fc2qv\" (UniqueName: \"kubernetes.io/projected/1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8-kube-api-access-fc2qv\") pod \"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8\" (UID: \"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8\") " Jan 28 12:45:27 crc kubenswrapper[4685]: I0128 12:45:27.949387 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8-kube-api-access-fc2qv" (OuterVolumeSpecName: "kube-api-access-fc2qv") pod "1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8" (UID: "1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8"). InnerVolumeSpecName "kube-api-access-fc2qv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.046953 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fc2qv\" (UniqueName: \"kubernetes.io/projected/1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8-kube-api-access-fc2qv\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391195 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-db-sync-tpztp"] Jan 28 12:45:28 crc kubenswrapper[4685]: E0128 12:45:28.391418 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aa289a8-1ea3-4559-8d70-cab87d8fd9d9" containerName="mariadb-account-create-update" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391429 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aa289a8-1ea3-4559-8d70-cab87d8fd9d9" containerName="mariadb-account-create-update" Jan 28 12:45:28 crc kubenswrapper[4685]: E0128 12:45:28.391438 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8" containerName="registry-server" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391445 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8" containerName="registry-server" Jan 28 12:45:28 crc kubenswrapper[4685]: E0128 12:45:28.391468 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9126ced2-36b8-48c1-97e2-22a4d5b69ec9" containerName="mariadb-database-create" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391474 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="9126ced2-36b8-48c1-97e2-22a4d5b69ec9" containerName="mariadb-database-create" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391582 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aa289a8-1ea3-4559-8d70-cab87d8fd9d9" containerName="mariadb-account-create-update" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391593 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8" containerName="registry-server" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391605 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="9126ced2-36b8-48c1-97e2-22a4d5b69ec9" containerName="mariadb-database-create" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.391983 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.394561 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-keystone-dockercfg-pbxpr" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.394966 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.398614 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-config-data" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.398959 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-tpztp"] Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.402313 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-scripts" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.452888 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rfkv\" (UniqueName: \"kubernetes.io/projected/4f458232-c94a-45fa-a093-f9d9d1d09541-kube-api-access-5rfkv\") pod \"keystone-db-sync-tpztp\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.452938 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f458232-c94a-45fa-a093-f9d9d1d09541-config-data\") pod \"keystone-db-sync-tpztp\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.553829 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rfkv\" (UniqueName: \"kubernetes.io/projected/4f458232-c94a-45fa-a093-f9d9d1d09541-kube-api-access-5rfkv\") pod \"keystone-db-sync-tpztp\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.553893 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f458232-c94a-45fa-a093-f9d9d1d09541-config-data\") pod \"keystone-db-sync-tpztp\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.560059 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f458232-c94a-45fa-a093-f9d9d1d09541-config-data\") pod \"keystone-db-sync-tpztp\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.575675 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rfkv\" (UniqueName: \"kubernetes.io/projected/4f458232-c94a-45fa-a093-f9d9d1d09541-kube-api-access-5rfkv\") pod \"keystone-db-sync-tpztp\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.633796 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-index-srqv2" event={"ID":"1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8","Type":"ContainerDied","Data":"187bb09cf036ecb465c6f57eda87be7695bf014c7b03f582fb21915d511060b7"} Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.633826 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-index-srqv2" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.635519 4685 scope.go:117] "RemoveContainer" containerID="c4b2c1830000e7470ac3781f6da8d877a4aee4629aee724c3f0736445baa6076" Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.660032 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/horizon-operator-index-srqv2"] Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.677531 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/horizon-operator-index-srqv2"] Jan 28 12:45:28 crc kubenswrapper[4685]: I0128 12:45:28.708445 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:29 crc kubenswrapper[4685]: I0128 12:45:29.112224 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-tpztp"] Jan 28 12:45:29 crc kubenswrapper[4685]: I0128 12:45:29.642443 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-tpztp" event={"ID":"4f458232-c94a-45fa-a093-f9d9d1d09541","Type":"ContainerStarted","Data":"685b8dd416bfe7517a7d7dedca29d7b4f30281398d9b55d689ce8e648e379ef2"} Jan 28 12:45:29 crc kubenswrapper[4685]: I0128 12:45:29.646033 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-97nxq" event={"ID":"fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec","Type":"ContainerStarted","Data":"39af687046cc48fa4f1e9a3d0d194a9c0575ae70889a93c85e97e08be5f1e4c1"} Jan 28 12:45:29 crc kubenswrapper[4685]: I0128 12:45:29.666362 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-index-97nxq" podStartSLOduration=7.67563638 podStartE2EDuration="10.666330462s" podCreationTimestamp="2026-01-28 12:45:19 +0000 UTC" firstStartedPulling="2026-01-28 12:45:25.503043216 +0000 UTC m=+1476.590457051" lastFinishedPulling="2026-01-28 12:45:28.493737298 +0000 UTC m=+1479.581151133" observedRunningTime="2026-01-28 12:45:29.663680899 +0000 UTC m=+1480.751094744" watchObservedRunningTime="2026-01-28 12:45:29.666330462 +0000 UTC m=+1480.753744297" Jan 28 12:45:29 crc kubenswrapper[4685]: I0128 12:45:29.912766 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:29 crc kubenswrapper[4685]: I0128 12:45:29.912833 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:29 crc kubenswrapper[4685]: I0128 12:45:29.944848 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:30 crc kubenswrapper[4685]: I0128 12:45:30.554781 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8" path="/var/lib/kubelet/pods/1b1ce3c1-a2eb-47a2-a440-b4a89046ebe8/volumes" Jan 28 12:45:36 crc kubenswrapper[4685]: I0128 12:45:36.697032 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-tpztp" event={"ID":"4f458232-c94a-45fa-a093-f9d9d1d09541","Type":"ContainerStarted","Data":"1c90c4c0180e4e7a42fe9c87dfe1f824b46c3fdee916c92ed93d10d3225a5d44"} Jan 28 12:45:36 crc kubenswrapper[4685]: I0128 12:45:36.715268 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-db-sync-tpztp" podStartSLOduration=2.045092672 podStartE2EDuration="8.715245619s" podCreationTimestamp="2026-01-28 12:45:28 +0000 UTC" firstStartedPulling="2026-01-28 12:45:29.122652726 +0000 UTC m=+1480.210066561" lastFinishedPulling="2026-01-28 12:45:35.792805673 +0000 UTC m=+1486.880219508" observedRunningTime="2026-01-28 12:45:36.71031481 +0000 UTC m=+1487.797728645" watchObservedRunningTime="2026-01-28 12:45:36.715245619 +0000 UTC m=+1487.802659454" Jan 28 12:45:39 crc kubenswrapper[4685]: I0128 12:45:39.516063 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:39 crc kubenswrapper[4685]: I0128 12:45:39.516122 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:39 crc kubenswrapper[4685]: I0128 12:45:39.545833 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:39 crc kubenswrapper[4685]: I0128 12:45:39.738721 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-index-97nxq" Jan 28 12:45:39 crc kubenswrapper[4685]: I0128 12:45:39.937639 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-index-qjv59" Jan 28 12:45:40 crc kubenswrapper[4685]: I0128 12:45:40.720398 4685 generic.go:334] "Generic (PLEG): container finished" podID="4f458232-c94a-45fa-a093-f9d9d1d09541" containerID="1c90c4c0180e4e7a42fe9c87dfe1f824b46c3fdee916c92ed93d10d3225a5d44" exitCode=0 Jan 28 12:45:40 crc kubenswrapper[4685]: I0128 12:45:40.721320 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-tpztp" event={"ID":"4f458232-c94a-45fa-a093-f9d9d1d09541","Type":"ContainerDied","Data":"1c90c4c0180e4e7a42fe9c87dfe1f824b46c3fdee916c92ed93d10d3225a5d44"} Jan 28 12:45:41 crc kubenswrapper[4685]: I0128 12:45:41.980224 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.077068 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rfkv\" (UniqueName: \"kubernetes.io/projected/4f458232-c94a-45fa-a093-f9d9d1d09541-kube-api-access-5rfkv\") pod \"4f458232-c94a-45fa-a093-f9d9d1d09541\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.077123 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f458232-c94a-45fa-a093-f9d9d1d09541-config-data\") pod \"4f458232-c94a-45fa-a093-f9d9d1d09541\" (UID: \"4f458232-c94a-45fa-a093-f9d9d1d09541\") " Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.084410 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f458232-c94a-45fa-a093-f9d9d1d09541-kube-api-access-5rfkv" (OuterVolumeSpecName: "kube-api-access-5rfkv") pod "4f458232-c94a-45fa-a093-f9d9d1d09541" (UID: "4f458232-c94a-45fa-a093-f9d9d1d09541"). InnerVolumeSpecName "kube-api-access-5rfkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.120883 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f458232-c94a-45fa-a093-f9d9d1d09541-config-data" (OuterVolumeSpecName: "config-data") pod "4f458232-c94a-45fa-a093-f9d9d1d09541" (UID: "4f458232-c94a-45fa-a093-f9d9d1d09541"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.178895 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rfkv\" (UniqueName: \"kubernetes.io/projected/4f458232-c94a-45fa-a093-f9d9d1d09541-kube-api-access-5rfkv\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.178941 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f458232-c94a-45fa-a093-f9d9d1d09541-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.734293 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-db-sync-tpztp" event={"ID":"4f458232-c94a-45fa-a093-f9d9d1d09541","Type":"ContainerDied","Data":"685b8dd416bfe7517a7d7dedca29d7b4f30281398d9b55d689ce8e648e379ef2"} Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.734653 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="685b8dd416bfe7517a7d7dedca29d7b4f30281398d9b55d689ce8e648e379ef2" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.734393 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-db-sync-tpztp" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.912470 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-sbvqt"] Jan 28 12:45:42 crc kubenswrapper[4685]: E0128 12:45:42.912855 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f458232-c94a-45fa-a093-f9d9d1d09541" containerName="keystone-db-sync" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.912880 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f458232-c94a-45fa-a093-f9d9d1d09541" containerName="keystone-db-sync" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.913033 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f458232-c94a-45fa-a093-f9d9d1d09541" containerName="keystone-db-sync" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.913582 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.915241 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-keystone-dockercfg-pbxpr" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.915327 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"osp-secret" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.915256 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-config-data" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.915633 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.916894 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-scripts" Jan 28 12:45:42 crc kubenswrapper[4685]: I0128 12:45:42.941810 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-sbvqt"] Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.092280 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-credential-keys\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.092351 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-fernet-keys\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.092412 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-config-data\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.092465 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-scripts\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.092487 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfpbr\" (UniqueName: \"kubernetes.io/projected/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-kube-api-access-wfpbr\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.194205 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-credential-keys\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.194251 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-fernet-keys\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.194290 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-config-data\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.194361 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-scripts\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.194390 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfpbr\" (UniqueName: \"kubernetes.io/projected/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-kube-api-access-wfpbr\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.200252 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-fernet-keys\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.200506 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-config-data\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.200856 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-credential-keys\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.201205 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-scripts\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.217722 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfpbr\" (UniqueName: \"kubernetes.io/projected/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-kube-api-access-wfpbr\") pod \"keystone-bootstrap-sbvqt\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.238711 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.726367 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-sbvqt"] Jan 28 12:45:43 crc kubenswrapper[4685]: W0128 12:45:43.734089 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2fc2e07_b32b_4b9c_b0ea_211e82f3d87a.slice/crio-bcb6e9c1ad276bd6950d2de83a2c177bb243eccfd1e539e4576c46bbedd99fe5 WatchSource:0}: Error finding container bcb6e9c1ad276bd6950d2de83a2c177bb243eccfd1e539e4576c46bbedd99fe5: Status 404 returned error can't find the container with id bcb6e9c1ad276bd6950d2de83a2c177bb243eccfd1e539e4576c46bbedd99fe5 Jan 28 12:45:43 crc kubenswrapper[4685]: I0128 12:45:43.742836 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" event={"ID":"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a","Type":"ContainerStarted","Data":"bcb6e9c1ad276bd6950d2de83a2c177bb243eccfd1e539e4576c46bbedd99fe5"} Jan 28 12:45:44 crc kubenswrapper[4685]: I0128 12:45:44.749529 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" event={"ID":"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a","Type":"ContainerStarted","Data":"1c403e054423ab204342fc1da39746eda0684a5e64958a3f8faf6a628c02add4"} Jan 28 12:45:44 crc kubenswrapper[4685]: I0128 12:45:44.773651 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" podStartSLOduration=2.77363259 podStartE2EDuration="2.77363259s" podCreationTimestamp="2026-01-28 12:45:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:45:44.769795731 +0000 UTC m=+1495.857209566" watchObservedRunningTime="2026-01-28 12:45:44.77363259 +0000 UTC m=+1495.861046425" Jan 28 12:45:47 crc kubenswrapper[4685]: I0128 12:45:47.770936 4685 generic.go:334] "Generic (PLEG): container finished" podID="a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" containerID="1c403e054423ab204342fc1da39746eda0684a5e64958a3f8faf6a628c02add4" exitCode=0 Jan 28 12:45:47 crc kubenswrapper[4685]: I0128 12:45:47.771028 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" event={"ID":"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a","Type":"ContainerDied","Data":"1c403e054423ab204342fc1da39746eda0684a5e64958a3f8faf6a628c02add4"} Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.036640 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.174736 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfpbr\" (UniqueName: \"kubernetes.io/projected/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-kube-api-access-wfpbr\") pod \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.174789 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-credential-keys\") pod \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.174835 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-scripts\") pod \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.174899 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-fernet-keys\") pod \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.174941 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-config-data\") pod \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\" (UID: \"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a\") " Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.181228 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" (UID: "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.181983 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" (UID: "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.182280 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-scripts" (OuterVolumeSpecName: "scripts") pod "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" (UID: "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.182974 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-kube-api-access-wfpbr" (OuterVolumeSpecName: "kube-api-access-wfpbr") pod "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" (UID: "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a"). InnerVolumeSpecName "kube-api-access-wfpbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.196399 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-config-data" (OuterVolumeSpecName: "config-data") pod "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" (UID: "a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.276673 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfpbr\" (UniqueName: \"kubernetes.io/projected/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-kube-api-access-wfpbr\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.276725 4685 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.276738 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.276748 4685 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.276760 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.789613 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" event={"ID":"a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a","Type":"ContainerDied","Data":"bcb6e9c1ad276bd6950d2de83a2c177bb243eccfd1e539e4576c46bbedd99fe5"} Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.790211 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcb6e9c1ad276bd6950d2de83a2c177bb243eccfd1e539e4576c46bbedd99fe5" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.789708 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-bootstrap-sbvqt" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.850243 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j"] Jan 28 12:45:49 crc kubenswrapper[4685]: E0128 12:45:49.850581 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" containerName="keystone-bootstrap" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.850598 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" containerName="keystone-bootstrap" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.850869 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" containerName="keystone-bootstrap" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.851962 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.854557 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-dvgnb" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.858138 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j"] Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.895651 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/keystone-784968dcbc-drlnm"] Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.897133 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.899486 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-scripts" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.899635 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-keystone-dockercfg-pbxpr" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.900086 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.900351 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"keystone-config-data" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.903726 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-784968dcbc-drlnm"] Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988390 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-util\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988487 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-bundle\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988521 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-fernet-keys\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988546 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9fth\" (UniqueName: \"kubernetes.io/projected/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-kube-api-access-s9fth\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988579 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-scripts\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988599 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-credential-keys\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988638 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-config-data\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:49 crc kubenswrapper[4685]: I0128 12:45:49.988676 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdnxs\" (UniqueName: \"kubernetes.io/projected/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-kube-api-access-vdnxs\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090425 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-bundle\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090475 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-fernet-keys\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090496 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9fth\" (UniqueName: \"kubernetes.io/projected/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-kube-api-access-s9fth\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090532 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-scripts\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090554 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-credential-keys\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090602 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-config-data\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090663 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdnxs\" (UniqueName: \"kubernetes.io/projected/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-kube-api-access-vdnxs\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090689 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-util\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.090965 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-bundle\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.091041 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-util\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.094976 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-scripts\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.095264 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-fernet-keys\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.095321 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-config-data\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.101792 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-credential-keys\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.110552 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdnxs\" (UniqueName: \"kubernetes.io/projected/782c1b4b-61bc-4cdb-8dc6-421d29d6b874-kube-api-access-vdnxs\") pod \"keystone-784968dcbc-drlnm\" (UID: \"782c1b4b-61bc-4cdb-8dc6-421d29d6b874\") " pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.112312 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9fth\" (UniqueName: \"kubernetes.io/projected/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-kube-api-access-s9fth\") pod \"56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.165916 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.213746 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.578904 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j"] Jan 28 12:45:50 crc kubenswrapper[4685]: W0128 12:45:50.581342 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6f26466_01ba_4d9b_9e8e_80baf6e27ea7.slice/crio-b101f8eeba8f12461e52dadd95172017c9621750ead57eba261295be25ba8926 WatchSource:0}: Error finding container b101f8eeba8f12461e52dadd95172017c9621750ead57eba261295be25ba8926: Status 404 returned error can't find the container with id b101f8eeba8f12461e52dadd95172017c9621750ead57eba261295be25ba8926 Jan 28 12:45:50 crc kubenswrapper[4685]: W0128 12:45:50.715002 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod782c1b4b_61bc_4cdb_8dc6_421d29d6b874.slice/crio-64d5d8ebf931d991ebfaee23020b471f106243f0e4d4debc60021d3b15a429c6 WatchSource:0}: Error finding container 64d5d8ebf931d991ebfaee23020b471f106243f0e4d4debc60021d3b15a429c6: Status 404 returned error can't find the container with id 64d5d8ebf931d991ebfaee23020b471f106243f0e4d4debc60021d3b15a429c6 Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.715619 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/keystone-784968dcbc-drlnm"] Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.799812 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" event={"ID":"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7","Type":"ContainerStarted","Data":"403b6e85be3a43787983317ea6f96cd1a1e61f2913a52ab10a90506741eaa30b"} Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.799868 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" event={"ID":"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7","Type":"ContainerStarted","Data":"b101f8eeba8f12461e52dadd95172017c9621750ead57eba261295be25ba8926"} Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.803235 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" event={"ID":"782c1b4b-61bc-4cdb-8dc6-421d29d6b874","Type":"ContainerStarted","Data":"64d5d8ebf931d991ebfaee23020b471f106243f0e4d4debc60021d3b15a429c6"} Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.840811 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2"] Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.842054 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:50 crc kubenswrapper[4685]: I0128 12:45:50.850299 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2"] Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.014652 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-bundle\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.014813 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-util\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.014844 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqzct\" (UniqueName: \"kubernetes.io/projected/79936f73-f97a-4c8d-ba46-ad531e8ed560-kube-api-access-vqzct\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.116429 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqzct\" (UniqueName: \"kubernetes.io/projected/79936f73-f97a-4c8d-ba46-ad531e8ed560-kube-api-access-vqzct\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.116479 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-util\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.116521 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-bundle\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.117015 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-bundle\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.117099 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-util\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.134811 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqzct\" (UniqueName: \"kubernetes.io/projected/79936f73-f97a-4c8d-ba46-ad531e8ed560-kube-api-access-vqzct\") pod \"920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.159926 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.581086 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2"] Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.810736 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" event={"ID":"79936f73-f97a-4c8d-ba46-ad531e8ed560","Type":"ContainerStarted","Data":"11deb5e9c794947022dc7613213d738bf709f7812d24ab44c42fa85177d013ff"} Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.811130 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" event={"ID":"79936f73-f97a-4c8d-ba46-ad531e8ed560","Type":"ContainerStarted","Data":"cd19c5a249f1da5817ae4dd51112f36025b8bc71f4042642f3a0ae68be7ca4e3"} Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.814230 4685 generic.go:334] "Generic (PLEG): container finished" podID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerID="403b6e85be3a43787983317ea6f96cd1a1e61f2913a52ab10a90506741eaa30b" exitCode=0 Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.814305 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" event={"ID":"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7","Type":"ContainerDied","Data":"403b6e85be3a43787983317ea6f96cd1a1e61f2913a52ab10a90506741eaa30b"} Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.816562 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" event={"ID":"782c1b4b-61bc-4cdb-8dc6-421d29d6b874","Type":"ContainerStarted","Data":"62ecae471ee46b48fb8edee703a477382c548085b153dde55bb20b2eaf486261"} Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.816948 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:45:51 crc kubenswrapper[4685]: I0128 12:45:51.856120 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" podStartSLOduration=2.8560967 podStartE2EDuration="2.8560967s" podCreationTimestamp="2026-01-28 12:45:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:45:51.851243193 +0000 UTC m=+1502.938657048" watchObservedRunningTime="2026-01-28 12:45:51.8560967 +0000 UTC m=+1502.943510535" Jan 28 12:45:52 crc kubenswrapper[4685]: I0128 12:45:52.824399 4685 generic.go:334] "Generic (PLEG): container finished" podID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerID="11deb5e9c794947022dc7613213d738bf709f7812d24ab44c42fa85177d013ff" exitCode=0 Jan 28 12:45:52 crc kubenswrapper[4685]: I0128 12:45:52.824515 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" event={"ID":"79936f73-f97a-4c8d-ba46-ad531e8ed560","Type":"ContainerDied","Data":"11deb5e9c794947022dc7613213d738bf709f7812d24ab44c42fa85177d013ff"} Jan 28 12:45:52 crc kubenswrapper[4685]: I0128 12:45:52.830960 4685 generic.go:334] "Generic (PLEG): container finished" podID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerID="9b5567c28e769772f33af3e076a8c5b790a9025a40249a872f3e8cb0843e0ee7" exitCode=0 Jan 28 12:45:52 crc kubenswrapper[4685]: I0128 12:45:52.831275 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" event={"ID":"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7","Type":"ContainerDied","Data":"9b5567c28e769772f33af3e076a8c5b790a9025a40249a872f3e8cb0843e0ee7"} Jan 28 12:45:53 crc kubenswrapper[4685]: I0128 12:45:53.842633 4685 generic.go:334] "Generic (PLEG): container finished" podID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerID="f33e03fc4e54db3cbfab9efa17622e82024b54f5e18d60fa1d0212deac9d10f2" exitCode=0 Jan 28 12:45:53 crc kubenswrapper[4685]: I0128 12:45:53.842692 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" event={"ID":"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7","Type":"ContainerDied","Data":"f33e03fc4e54db3cbfab9efa17622e82024b54f5e18d60fa1d0212deac9d10f2"} Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.160226 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.290794 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9fth\" (UniqueName: \"kubernetes.io/projected/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-kube-api-access-s9fth\") pod \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.290900 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-bundle\") pod \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.291070 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-util\") pod \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\" (UID: \"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7\") " Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.293398 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-bundle" (OuterVolumeSpecName: "bundle") pod "a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" (UID: "a6f26466-01ba-4d9b-9e8e-80baf6e27ea7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.298612 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-kube-api-access-s9fth" (OuterVolumeSpecName: "kube-api-access-s9fth") pod "a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" (UID: "a6f26466-01ba-4d9b-9e8e-80baf6e27ea7"). InnerVolumeSpecName "kube-api-access-s9fth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.309551 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-util" (OuterVolumeSpecName: "util") pod "a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" (UID: "a6f26466-01ba-4d9b-9e8e-80baf6e27ea7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.393325 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9fth\" (UniqueName: \"kubernetes.io/projected/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-kube-api-access-s9fth\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.393363 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.393397 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6f26466-01ba-4d9b-9e8e-80baf6e27ea7-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.859126 4685 generic.go:334] "Generic (PLEG): container finished" podID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerID="7db37f70531c35090c8a904d4e41647b4b64ae0afd05295d380c01c29663e10f" exitCode=0 Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.859193 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" event={"ID":"79936f73-f97a-4c8d-ba46-ad531e8ed560","Type":"ContainerDied","Data":"7db37f70531c35090c8a904d4e41647b4b64ae0afd05295d380c01c29663e10f"} Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.861656 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" event={"ID":"a6f26466-01ba-4d9b-9e8e-80baf6e27ea7","Type":"ContainerDied","Data":"b101f8eeba8f12461e52dadd95172017c9621750ead57eba261295be25ba8926"} Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.861697 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j" Jan 28 12:45:55 crc kubenswrapper[4685]: I0128 12:45:55.861706 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b101f8eeba8f12461e52dadd95172017c9621750ead57eba261295be25ba8926" Jan 28 12:45:56 crc kubenswrapper[4685]: I0128 12:45:56.872990 4685 generic.go:334] "Generic (PLEG): container finished" podID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerID="62b868b410f7d18ef37f08a3d98e678e674518ea9d86d0cec0074df64827fc83" exitCode=0 Jan 28 12:45:56 crc kubenswrapper[4685]: I0128 12:45:56.873061 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" event={"ID":"79936f73-f97a-4c8d-ba46-ad531e8ed560","Type":"ContainerDied","Data":"62b868b410f7d18ef37f08a3d98e678e674518ea9d86d0cec0074df64827fc83"} Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.125207 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.233683 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqzct\" (UniqueName: \"kubernetes.io/projected/79936f73-f97a-4c8d-ba46-ad531e8ed560-kube-api-access-vqzct\") pod \"79936f73-f97a-4c8d-ba46-ad531e8ed560\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.233801 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-bundle\") pod \"79936f73-f97a-4c8d-ba46-ad531e8ed560\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.233897 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-util\") pod \"79936f73-f97a-4c8d-ba46-ad531e8ed560\" (UID: \"79936f73-f97a-4c8d-ba46-ad531e8ed560\") " Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.234699 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-bundle" (OuterVolumeSpecName: "bundle") pod "79936f73-f97a-4c8d-ba46-ad531e8ed560" (UID: "79936f73-f97a-4c8d-ba46-ad531e8ed560"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.241990 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79936f73-f97a-4c8d-ba46-ad531e8ed560-kube-api-access-vqzct" (OuterVolumeSpecName: "kube-api-access-vqzct") pod "79936f73-f97a-4c8d-ba46-ad531e8ed560" (UID: "79936f73-f97a-4c8d-ba46-ad531e8ed560"). InnerVolumeSpecName "kube-api-access-vqzct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.246567 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-util" (OuterVolumeSpecName: "util") pod "79936f73-f97a-4c8d-ba46-ad531e8ed560" (UID: "79936f73-f97a-4c8d-ba46-ad531e8ed560"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.335251 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.335294 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79936f73-f97a-4c8d-ba46-ad531e8ed560-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.335307 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqzct\" (UniqueName: \"kubernetes.io/projected/79936f73-f97a-4c8d-ba46-ad531e8ed560-kube-api-access-vqzct\") on node \"crc\" DevicePath \"\"" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.892389 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" event={"ID":"79936f73-f97a-4c8d-ba46-ad531e8ed560","Type":"ContainerDied","Data":"cd19c5a249f1da5817ae4dd51112f36025b8bc71f4042642f3a0ae68be7ca4e3"} Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.892878 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd19c5a249f1da5817ae4dd51112f36025b8bc71f4042642f3a0ae68be7ca4e3" Jan 28 12:45:58 crc kubenswrapper[4685]: I0128 12:45:58.892500 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.905510 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn"] Jan 28 12:46:08 crc kubenswrapper[4685]: E0128 12:46:08.906360 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerName="pull" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906380 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerName="pull" Jan 28 12:46:08 crc kubenswrapper[4685]: E0128 12:46:08.906391 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerName="pull" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906398 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerName="pull" Jan 28 12:46:08 crc kubenswrapper[4685]: E0128 12:46:08.906414 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerName="util" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906421 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerName="util" Jan 28 12:46:08 crc kubenswrapper[4685]: E0128 12:46:08.906437 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerName="extract" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906446 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerName="extract" Jan 28 12:46:08 crc kubenswrapper[4685]: E0128 12:46:08.906460 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerName="extract" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906468 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerName="extract" Jan 28 12:46:08 crc kubenswrapper[4685]: E0128 12:46:08.906481 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerName="util" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906488 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerName="util" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906637 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="79936f73-f97a-4c8d-ba46-ad531e8ed560" containerName="extract" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.906656 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6f26466-01ba-4d9b-9e8e-80baf6e27ea7" containerName="extract" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.907239 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.909068 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-sdj7h" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.909068 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-service-cert" Jan 28 12:46:08 crc kubenswrapper[4685]: I0128 12:46:08.919511 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn"] Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.082392 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-apiservice-cert\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.082440 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56kd5\" (UniqueName: \"kubernetes.io/projected/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-kube-api-access-56kd5\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.082523 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-webhook-cert\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.183583 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-apiservice-cert\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.183632 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56kd5\" (UniqueName: \"kubernetes.io/projected/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-kube-api-access-56kd5\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.183688 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-webhook-cert\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.189172 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-webhook-cert\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.189662 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-apiservice-cert\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.201781 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56kd5\" (UniqueName: \"kubernetes.io/projected/c1ad1a61-ac72-41ad-8bce-8ac55acf382e-kube-api-access-56kd5\") pod \"horizon-operator-controller-manager-68f7444d68-zkgvn\" (UID: \"c1ad1a61-ac72-41ad-8bce-8ac55acf382e\") " pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.229514 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.671992 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn"] Jan 28 12:46:09 crc kubenswrapper[4685]: W0128 12:46:09.685400 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1ad1a61_ac72_41ad_8bce_8ac55acf382e.slice/crio-7272e7950beecb6a30bdc7299d2806d9a594ca5894aba89ce0448cc9771a4988 WatchSource:0}: Error finding container 7272e7950beecb6a30bdc7299d2806d9a594ca5894aba89ce0448cc9771a4988: Status 404 returned error can't find the container with id 7272e7950beecb6a30bdc7299d2806d9a594ca5894aba89ce0448cc9771a4988 Jan 28 12:46:09 crc kubenswrapper[4685]: I0128 12:46:09.962399 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" event={"ID":"c1ad1a61-ac72-41ad-8bce-8ac55acf382e","Type":"ContainerStarted","Data":"7272e7950beecb6a30bdc7299d2806d9a594ca5894aba89ce0448cc9771a4988"} Jan 28 12:46:12 crc kubenswrapper[4685]: I0128 12:46:12.988143 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" event={"ID":"c1ad1a61-ac72-41ad-8bce-8ac55acf382e","Type":"ContainerStarted","Data":"0ab39ad69fa1ba69c50afc70f7af044cc59f7c7bea823910ac04df59680881f2"} Jan 28 12:46:12 crc kubenswrapper[4685]: I0128 12:46:12.988483 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:13 crc kubenswrapper[4685]: I0128 12:46:13.020247 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" podStartSLOduration=2.685598418 podStartE2EDuration="5.020160627s" podCreationTimestamp="2026-01-28 12:46:08 +0000 UTC" firstStartedPulling="2026-01-28 12:46:09.687428001 +0000 UTC m=+1520.774841836" lastFinishedPulling="2026-01-28 12:46:12.02199019 +0000 UTC m=+1523.109404045" observedRunningTime="2026-01-28 12:46:13.012794279 +0000 UTC m=+1524.100208124" watchObservedRunningTime="2026-01-28 12:46:13.020160627 +0000 UTC m=+1524.107574462" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.153400 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn"] Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.154499 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.157210 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-4x25q" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.157662 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-service-cert" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.171697 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn"] Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.292652 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5nmf\" (UniqueName: \"kubernetes.io/projected/98ccfb23-1658-40c4-bb6a-30e64771d98a-kube-api-access-h5nmf\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.292713 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/98ccfb23-1658-40c4-bb6a-30e64771d98a-apiservice-cert\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.292783 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/98ccfb23-1658-40c4-bb6a-30e64771d98a-webhook-cert\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.394587 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5nmf\" (UniqueName: \"kubernetes.io/projected/98ccfb23-1658-40c4-bb6a-30e64771d98a-kube-api-access-h5nmf\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.394655 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/98ccfb23-1658-40c4-bb6a-30e64771d98a-apiservice-cert\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.394691 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/98ccfb23-1658-40c4-bb6a-30e64771d98a-webhook-cert\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.400517 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/98ccfb23-1658-40c4-bb6a-30e64771d98a-apiservice-cert\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.400685 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/98ccfb23-1658-40c4-bb6a-30e64771d98a-webhook-cert\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.409803 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5nmf\" (UniqueName: \"kubernetes.io/projected/98ccfb23-1658-40c4-bb6a-30e64771d98a-kube-api-access-h5nmf\") pod \"swift-operator-controller-manager-c8fb945fd-4b7jn\" (UID: \"98ccfb23-1658-40c4-bb6a-30e64771d98a\") " pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.475355 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:17 crc kubenswrapper[4685]: I0128 12:46:17.866475 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn"] Jan 28 12:46:18 crc kubenswrapper[4685]: I0128 12:46:18.019096 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" event={"ID":"98ccfb23-1658-40c4-bb6a-30e64771d98a","Type":"ContainerStarted","Data":"c10eb705f462a92b63d8ad94f9b032d0b20453bf6c4a0a957aad8e6a1e4c161e"} Jan 28 12:46:19 crc kubenswrapper[4685]: I0128 12:46:19.236287 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68f7444d68-zkgvn" Jan 28 12:46:21 crc kubenswrapper[4685]: I0128 12:46:21.684605 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/keystone-784968dcbc-drlnm" Jan 28 12:46:22 crc kubenswrapper[4685]: I0128 12:46:22.059561 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" event={"ID":"98ccfb23-1658-40c4-bb6a-30e64771d98a","Type":"ContainerStarted","Data":"d3b96e2118398f856f821d780c9a5128ca19a58fea66a3e40193b609433ab5be"} Jan 28 12:46:22 crc kubenswrapper[4685]: I0128 12:46:22.059783 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:22 crc kubenswrapper[4685]: I0128 12:46:22.083043 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" podStartSLOduration=1.7903461410000001 podStartE2EDuration="5.083022184s" podCreationTimestamp="2026-01-28 12:46:17 +0000 UTC" firstStartedPulling="2026-01-28 12:46:17.872902765 +0000 UTC m=+1528.960316600" lastFinishedPulling="2026-01-28 12:46:21.165578808 +0000 UTC m=+1532.252992643" observedRunningTime="2026-01-28 12:46:22.075203843 +0000 UTC m=+1533.162617688" watchObservedRunningTime="2026-01-28 12:46:22.083022184 +0000 UTC m=+1533.170436019" Jan 28 12:46:27 crc kubenswrapper[4685]: I0128 12:46:27.069812 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:46:27 crc kubenswrapper[4685]: I0128 12:46:27.070892 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:46:27 crc kubenswrapper[4685]: I0128 12:46:27.480625 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-c8fb945fd-4b7jn" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.028389 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/swift-storage-0"] Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.033904 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.035733 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-storage-config-data" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.036152 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"swift-conf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.039758 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-ring-files" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.040037 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"swift-swift-dockercfg-5dxbt" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.060334 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-storage-0"] Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.108201 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.108261 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99z5p\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-kube-api-access-99z5p\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.108290 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/031a6182-897b-45d0-a48f-a9473aebe554-lock\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.108309 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.108326 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/031a6182-897b-45d0-a48f-a9473aebe554-cache\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.209451 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.209511 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99z5p\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-kube-api-access-99z5p\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.209538 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/031a6182-897b-45d0-a48f-a9473aebe554-lock\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.209558 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.209577 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/031a6182-897b-45d0-a48f-a9473aebe554-cache\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: E0128 12:46:33.209782 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:33 crc kubenswrapper[4685]: E0128 12:46:33.209850 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 28 12:46:33 crc kubenswrapper[4685]: E0128 12:46:33.209951 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift podName:031a6182-897b-45d0-a48f-a9473aebe554 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:33.709924441 +0000 UTC m=+1544.797338266 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift") pod "swift-storage-0" (UID: "031a6182-897b-45d0-a48f-a9473aebe554") : configmap "swift-ring-files" not found Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.210224 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") device mount path \"/mnt/openstack/pv03\"" pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.210230 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/031a6182-897b-45d0-a48f-a9473aebe554-cache\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.210468 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/031a6182-897b-45d0-a48f-a9473aebe554-lock\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.228380 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99z5p\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-kube-api-access-99z5p\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.229405 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.533401 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-dqwqf"] Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.534691 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.537193 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-ring-config-data" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.537210 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"swift-proxy-config-data" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.539946 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"swift-ring-scripts" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.548819 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-dqwqf"] Jan 28 12:46:33 crc kubenswrapper[4685]: E0128 12:46:33.556126 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[dispersionconf etc-swift kube-api-access-lmt5x ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[dispersionconf etc-swift kube-api-access-lmt5x ring-data-devices scripts swiftconf]: context canceled" pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" podUID="40f0a059-5775-49c8-b5c3-5e686617b920" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.570692 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-dqwqf"] Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.582790 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-q4x4r"] Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.584208 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.609026 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-q4x4r"] Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615152 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-scripts\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615267 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmt5x\" (UniqueName: \"kubernetes.io/projected/40f0a059-5775-49c8-b5c3-5e686617b920-kube-api-access-lmt5x\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615357 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p8ms\" (UniqueName: \"kubernetes.io/projected/1d674f3f-5708-4b77-85fa-7fcfea43d98d-kube-api-access-5p8ms\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615514 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-dispersionconf\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615580 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-swiftconf\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615786 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/40f0a059-5775-49c8-b5c3-5e686617b920-etc-swift\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615836 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-ring-data-devices\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615873 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-swiftconf\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615919 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/1d674f3f-5708-4b77-85fa-7fcfea43d98d-etc-swift\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.615975 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-scripts\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.616050 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-dispersionconf\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.616081 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-ring-data-devices\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717230 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/40f0a059-5775-49c8-b5c3-5e686617b920-etc-swift\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717327 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-ring-data-devices\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717358 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-swiftconf\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717388 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/1d674f3f-5708-4b77-85fa-7fcfea43d98d-etc-swift\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717420 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-scripts\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717466 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-dispersionconf\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717489 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-ring-data-devices\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717514 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmt5x\" (UniqueName: \"kubernetes.io/projected/40f0a059-5775-49c8-b5c3-5e686617b920-kube-api-access-lmt5x\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717533 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-scripts\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717576 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p8ms\" (UniqueName: \"kubernetes.io/projected/1d674f3f-5708-4b77-85fa-7fcfea43d98d-kube-api-access-5p8ms\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717612 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-dispersionconf\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717639 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-swiftconf\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.717671 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:33 crc kubenswrapper[4685]: E0128 12:46:33.717821 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:33 crc kubenswrapper[4685]: E0128 12:46:33.717838 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 28 12:46:33 crc kubenswrapper[4685]: E0128 12:46:33.717887 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift podName:031a6182-897b-45d0-a48f-a9473aebe554 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:34.717870651 +0000 UTC m=+1545.805284506 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift") pod "swift-storage-0" (UID: "031a6182-897b-45d0-a48f-a9473aebe554") : configmap "swift-ring-files" not found Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.718826 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-ring-data-devices\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.719096 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-scripts\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.719244 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/40f0a059-5775-49c8-b5c3-5e686617b920-etc-swift\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.720040 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-ring-data-devices\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.720400 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-scripts\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.720664 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/1d674f3f-5708-4b77-85fa-7fcfea43d98d-etc-swift\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.723428 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-swiftconf\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.725548 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-dispersionconf\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.725774 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-dispersionconf\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.726190 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-swiftconf\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.742245 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p8ms\" (UniqueName: \"kubernetes.io/projected/1d674f3f-5708-4b77-85fa-7fcfea43d98d-kube-api-access-5p8ms\") pod \"swift-ring-rebalance-q4x4r\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.748981 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmt5x\" (UniqueName: \"kubernetes.io/projected/40f0a059-5775-49c8-b5c3-5e686617b920-kube-api-access-lmt5x\") pod \"swift-ring-rebalance-dqwqf\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:33 crc kubenswrapper[4685]: I0128 12:46:33.914712 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.137214 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.150900 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.192214 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-index-kv289"] Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.193219 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.199786 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-index-dockercfg-2l8nz" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.200674 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-index-kv289"] Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.222988 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmt5x\" (UniqueName: \"kubernetes.io/projected/40f0a059-5775-49c8-b5c3-5e686617b920-kube-api-access-lmt5x\") pod \"40f0a059-5775-49c8-b5c3-5e686617b920\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.223071 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-scripts\") pod \"40f0a059-5775-49c8-b5c3-5e686617b920\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.223147 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-ring-data-devices\") pod \"40f0a059-5775-49c8-b5c3-5e686617b920\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.223242 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-dispersionconf\") pod \"40f0a059-5775-49c8-b5c3-5e686617b920\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.223791 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-scripts" (OuterVolumeSpecName: "scripts") pod "40f0a059-5775-49c8-b5c3-5e686617b920" (UID: "40f0a059-5775-49c8-b5c3-5e686617b920"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.223802 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "40f0a059-5775-49c8-b5c3-5e686617b920" (UID: "40f0a059-5775-49c8-b5c3-5e686617b920"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.223889 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-swiftconf\") pod \"40f0a059-5775-49c8-b5c3-5e686617b920\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.223929 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/40f0a059-5775-49c8-b5c3-5e686617b920-etc-swift\") pod \"40f0a059-5775-49c8-b5c3-5e686617b920\" (UID: \"40f0a059-5775-49c8-b5c3-5e686617b920\") " Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.224244 4685 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.224258 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40f0a059-5775-49c8-b5c3-5e686617b920-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.224502 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40f0a059-5775-49c8-b5c3-5e686617b920-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "40f0a059-5775-49c8-b5c3-5e686617b920" (UID: "40f0a059-5775-49c8-b5c3-5e686617b920"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.248207 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "40f0a059-5775-49c8-b5c3-5e686617b920" (UID: "40f0a059-5775-49c8-b5c3-5e686617b920"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.251424 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40f0a059-5775-49c8-b5c3-5e686617b920-kube-api-access-lmt5x" (OuterVolumeSpecName: "kube-api-access-lmt5x") pod "40f0a059-5775-49c8-b5c3-5e686617b920" (UID: "40f0a059-5775-49c8-b5c3-5e686617b920"). InnerVolumeSpecName "kube-api-access-lmt5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.264318 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "40f0a059-5775-49c8-b5c3-5e686617b920" (UID: "40f0a059-5775-49c8-b5c3-5e686617b920"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.325471 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gchp6\" (UniqueName: \"kubernetes.io/projected/421794c1-13e8-4c5b-8515-2eca0ab070b5-kube-api-access-gchp6\") pod \"glance-operator-index-kv289\" (UID: \"421794c1-13e8-4c5b-8515-2eca0ab070b5\") " pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.325619 4685 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.325632 4685 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/40f0a059-5775-49c8-b5c3-5e686617b920-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.325641 4685 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/40f0a059-5775-49c8-b5c3-5e686617b920-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.325649 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmt5x\" (UniqueName: \"kubernetes.io/projected/40f0a059-5775-49c8-b5c3-5e686617b920-kube-api-access-lmt5x\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.419277 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-q4x4r"] Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.426261 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gchp6\" (UniqueName: \"kubernetes.io/projected/421794c1-13e8-4c5b-8515-2eca0ab070b5-kube-api-access-gchp6\") pod \"glance-operator-index-kv289\" (UID: \"421794c1-13e8-4c5b-8515-2eca0ab070b5\") " pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.447493 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gchp6\" (UniqueName: \"kubernetes.io/projected/421794c1-13e8-4c5b-8515-2eca0ab070b5-kube-api-access-gchp6\") pod \"glance-operator-index-kv289\" (UID: \"421794c1-13e8-4c5b-8515-2eca0ab070b5\") " pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.519629 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.598708 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn"] Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.599827 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.614630 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn"] Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.731529 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4a145e58-4189-4128-981c-48608a766854-log-httpd\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.731593 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bqsp\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-kube-api-access-6bqsp\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.731670 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.731714 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a145e58-4189-4128-981c-48608a766854-config-data\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.731752 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4a145e58-4189-4128-981c-48608a766854-run-httpd\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.731821 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:34 crc kubenswrapper[4685]: E0128 12:46:34.732073 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:34 crc kubenswrapper[4685]: E0128 12:46:34.732110 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 28 12:46:34 crc kubenswrapper[4685]: E0128 12:46:34.732156 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift podName:031a6182-897b-45d0-a48f-a9473aebe554 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:36.732140533 +0000 UTC m=+1547.819554368 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift") pod "swift-storage-0" (UID: "031a6182-897b-45d0-a48f-a9473aebe554") : configmap "swift-ring-files" not found Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.833668 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a145e58-4189-4128-981c-48608a766854-config-data\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.833733 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4a145e58-4189-4128-981c-48608a766854-run-httpd\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.833788 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4a145e58-4189-4128-981c-48608a766854-log-httpd\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.833807 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bqsp\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-kube-api-access-6bqsp\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.833860 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: E0128 12:46:34.834054 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:34 crc kubenswrapper[4685]: E0128 12:46:34.834074 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn: configmap "swift-ring-files" not found Jan 28 12:46:34 crc kubenswrapper[4685]: E0128 12:46:34.834115 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift podName:4a145e58-4189-4128-981c-48608a766854 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:35.334099996 +0000 UTC m=+1546.421513821 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift") pod "swift-proxy-cb6f749b7-85dsn" (UID: "4a145e58-4189-4128-981c-48608a766854") : configmap "swift-ring-files" not found Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.834720 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4a145e58-4189-4128-981c-48608a766854-run-httpd\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.834792 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4a145e58-4189-4128-981c-48608a766854-log-httpd\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.839483 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a145e58-4189-4128-981c-48608a766854-config-data\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.853115 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bqsp\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-kube-api-access-6bqsp\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:34 crc kubenswrapper[4685]: I0128 12:46:34.983519 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-index-kv289"] Jan 28 12:46:34 crc kubenswrapper[4685]: W0128 12:46:34.988877 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod421794c1_13e8_4c5b_8515_2eca0ab070b5.slice/crio-6142a7e43b1eeb00cc5433d3373e79fd5acf176275334fc6c1a880038a15bba6 WatchSource:0}: Error finding container 6142a7e43b1eeb00cc5433d3373e79fd5acf176275334fc6c1a880038a15bba6: Status 404 returned error can't find the container with id 6142a7e43b1eeb00cc5433d3373e79fd5acf176275334fc6c1a880038a15bba6 Jan 28 12:46:35 crc kubenswrapper[4685]: I0128 12:46:35.147641 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" event={"ID":"1d674f3f-5708-4b77-85fa-7fcfea43d98d","Type":"ContainerStarted","Data":"f4d02f17a1f89aec44dca56ece5b85945fc149ee207fe072cf5a8b29b9cd5d3c"} Jan 28 12:46:35 crc kubenswrapper[4685]: I0128 12:46:35.153923 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-kv289" event={"ID":"421794c1-13e8-4c5b-8515-2eca0ab070b5","Type":"ContainerStarted","Data":"6142a7e43b1eeb00cc5433d3373e79fd5acf176275334fc6c1a880038a15bba6"} Jan 28 12:46:35 crc kubenswrapper[4685]: I0128 12:46:35.153985 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-dqwqf" Jan 28 12:46:35 crc kubenswrapper[4685]: I0128 12:46:35.194772 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-dqwqf"] Jan 28 12:46:35 crc kubenswrapper[4685]: I0128 12:46:35.207555 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/swift-ring-rebalance-dqwqf"] Jan 28 12:46:35 crc kubenswrapper[4685]: I0128 12:46:35.344022 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:35 crc kubenswrapper[4685]: E0128 12:46:35.344251 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:35 crc kubenswrapper[4685]: E0128 12:46:35.344269 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn: configmap "swift-ring-files" not found Jan 28 12:46:35 crc kubenswrapper[4685]: E0128 12:46:35.344327 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift podName:4a145e58-4189-4128-981c-48608a766854 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:36.344306469 +0000 UTC m=+1547.431720304 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift") pod "swift-proxy-cb6f749b7-85dsn" (UID: "4a145e58-4189-4128-981c-48608a766854") : configmap "swift-ring-files" not found Jan 28 12:46:36 crc kubenswrapper[4685]: I0128 12:46:36.367830 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:36 crc kubenswrapper[4685]: E0128 12:46:36.368226 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:36 crc kubenswrapper[4685]: E0128 12:46:36.368252 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn: configmap "swift-ring-files" not found Jan 28 12:46:36 crc kubenswrapper[4685]: E0128 12:46:36.368308 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift podName:4a145e58-4189-4128-981c-48608a766854 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:38.368288218 +0000 UTC m=+1549.455702053 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift") pod "swift-proxy-cb6f749b7-85dsn" (UID: "4a145e58-4189-4128-981c-48608a766854") : configmap "swift-ring-files" not found Jan 28 12:46:36 crc kubenswrapper[4685]: I0128 12:46:36.554091 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40f0a059-5775-49c8-b5c3-5e686617b920" path="/var/lib/kubelet/pods/40f0a059-5775-49c8-b5c3-5e686617b920/volumes" Jan 28 12:46:36 crc kubenswrapper[4685]: I0128 12:46:36.773819 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:36 crc kubenswrapper[4685]: E0128 12:46:36.774022 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:36 crc kubenswrapper[4685]: E0128 12:46:36.774058 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 28 12:46:36 crc kubenswrapper[4685]: E0128 12:46:36.774136 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift podName:031a6182-897b-45d0-a48f-a9473aebe554 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:40.77411742 +0000 UTC m=+1551.861531255 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift") pod "swift-storage-0" (UID: "031a6182-897b-45d0-a48f-a9473aebe554") : configmap "swift-ring-files" not found Jan 28 12:46:38 crc kubenswrapper[4685]: I0128 12:46:38.400070 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:38 crc kubenswrapper[4685]: E0128 12:46:38.400860 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:38 crc kubenswrapper[4685]: E0128 12:46:38.400883 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn: configmap "swift-ring-files" not found Jan 28 12:46:38 crc kubenswrapper[4685]: E0128 12:46:38.400946 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift podName:4a145e58-4189-4128-981c-48608a766854 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:42.400926 +0000 UTC m=+1553.488339835 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift") pod "swift-proxy-cb6f749b7-85dsn" (UID: "4a145e58-4189-4128-981c-48608a766854") : configmap "swift-ring-files" not found Jan 28 12:46:40 crc kubenswrapper[4685]: I0128 12:46:40.844191 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:40 crc kubenswrapper[4685]: E0128 12:46:40.844381 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:40 crc kubenswrapper[4685]: E0128 12:46:40.844404 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 28 12:46:40 crc kubenswrapper[4685]: E0128 12:46:40.844456 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift podName:031a6182-897b-45d0-a48f-a9473aebe554 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:48.844441479 +0000 UTC m=+1559.931855314 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift") pod "swift-storage-0" (UID: "031a6182-897b-45d0-a48f-a9473aebe554") : configmap "swift-ring-files" not found Jan 28 12:46:41 crc kubenswrapper[4685]: I0128 12:46:41.209900 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" event={"ID":"1d674f3f-5708-4b77-85fa-7fcfea43d98d","Type":"ContainerStarted","Data":"e21ee5a19c0d00ee9140f9a041f351d4dfd637decfabca672c6942025be59182"} Jan 28 12:46:41 crc kubenswrapper[4685]: I0128 12:46:41.212146 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-index-kv289" event={"ID":"421794c1-13e8-4c5b-8515-2eca0ab070b5","Type":"ContainerStarted","Data":"2a5a49117baf764d413932c8a9944dd3bfa759ccdfa12bebadee43b26a320624"} Jan 28 12:46:41 crc kubenswrapper[4685]: I0128 12:46:41.229191 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" podStartSLOduration=4.38327157 podStartE2EDuration="8.229153373s" podCreationTimestamp="2026-01-28 12:46:33 +0000 UTC" firstStartedPulling="2026-01-28 12:46:34.425022881 +0000 UTC m=+1545.512436716" lastFinishedPulling="2026-01-28 12:46:38.270904684 +0000 UTC m=+1549.358318519" observedRunningTime="2026-01-28 12:46:41.227319232 +0000 UTC m=+1552.314733087" watchObservedRunningTime="2026-01-28 12:46:41.229153373 +0000 UTC m=+1552.316567208" Jan 28 12:46:41 crc kubenswrapper[4685]: I0128 12:46:41.247075 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-index-kv289" podStartSLOduration=2.066459224 podStartE2EDuration="7.24705462s" podCreationTimestamp="2026-01-28 12:46:34 +0000 UTC" firstStartedPulling="2026-01-28 12:46:34.992068971 +0000 UTC m=+1546.079482796" lastFinishedPulling="2026-01-28 12:46:40.172664357 +0000 UTC m=+1551.260078192" observedRunningTime="2026-01-28 12:46:41.245280479 +0000 UTC m=+1552.332694314" watchObservedRunningTime="2026-01-28 12:46:41.24705462 +0000 UTC m=+1552.334468455" Jan 28 12:46:42 crc kubenswrapper[4685]: I0128 12:46:42.468224 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:42 crc kubenswrapper[4685]: E0128 12:46:42.468398 4685 projected.go:288] Couldn't get configMap glance-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 28 12:46:42 crc kubenswrapper[4685]: E0128 12:46:42.468571 4685 projected.go:194] Error preparing data for projected volume etc-swift for pod glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn: configmap "swift-ring-files" not found Jan 28 12:46:42 crc kubenswrapper[4685]: E0128 12:46:42.468625 4685 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift podName:4a145e58-4189-4128-981c-48608a766854 nodeName:}" failed. No retries permitted until 2026-01-28 12:46:50.468610073 +0000 UTC m=+1561.556023908 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift") pod "swift-proxy-cb6f749b7-85dsn" (UID: "4a145e58-4189-4128-981c-48608a766854") : configmap "swift-ring-files" not found Jan 28 12:46:44 crc kubenswrapper[4685]: I0128 12:46:44.520258 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:44 crc kubenswrapper[4685]: I0128 12:46:44.521304 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:44 crc kubenswrapper[4685]: I0128 12:46:44.568883 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:45 crc kubenswrapper[4685]: I0128 12:46:45.267718 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-index-kv289" Jan 28 12:46:47 crc kubenswrapper[4685]: I0128 12:46:47.253872 4685 generic.go:334] "Generic (PLEG): container finished" podID="1d674f3f-5708-4b77-85fa-7fcfea43d98d" containerID="e21ee5a19c0d00ee9140f9a041f351d4dfd637decfabca672c6942025be59182" exitCode=0 Jan 28 12:46:47 crc kubenswrapper[4685]: I0128 12:46:47.253937 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" event={"ID":"1d674f3f-5708-4b77-85fa-7fcfea43d98d","Type":"ContainerDied","Data":"e21ee5a19c0d00ee9140f9a041f351d4dfd637decfabca672c6942025be59182"} Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.509159 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.662369 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-ring-data-devices\") pod \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.662421 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-scripts\") pod \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.662476 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5p8ms\" (UniqueName: \"kubernetes.io/projected/1d674f3f-5708-4b77-85fa-7fcfea43d98d-kube-api-access-5p8ms\") pod \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.662497 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/1d674f3f-5708-4b77-85fa-7fcfea43d98d-etc-swift\") pod \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.662575 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-swiftconf\") pod \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.662628 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-dispersionconf\") pod \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\" (UID: \"1d674f3f-5708-4b77-85fa-7fcfea43d98d\") " Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.663898 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "1d674f3f-5708-4b77-85fa-7fcfea43d98d" (UID: "1d674f3f-5708-4b77-85fa-7fcfea43d98d"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.664251 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d674f3f-5708-4b77-85fa-7fcfea43d98d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "1d674f3f-5708-4b77-85fa-7fcfea43d98d" (UID: "1d674f3f-5708-4b77-85fa-7fcfea43d98d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.668948 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d674f3f-5708-4b77-85fa-7fcfea43d98d-kube-api-access-5p8ms" (OuterVolumeSpecName: "kube-api-access-5p8ms") pod "1d674f3f-5708-4b77-85fa-7fcfea43d98d" (UID: "1d674f3f-5708-4b77-85fa-7fcfea43d98d"). InnerVolumeSpecName "kube-api-access-5p8ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.671106 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "1d674f3f-5708-4b77-85fa-7fcfea43d98d" (UID: "1d674f3f-5708-4b77-85fa-7fcfea43d98d"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.680347 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-scripts" (OuterVolumeSpecName: "scripts") pod "1d674f3f-5708-4b77-85fa-7fcfea43d98d" (UID: "1d674f3f-5708-4b77-85fa-7fcfea43d98d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.684939 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "1d674f3f-5708-4b77-85fa-7fcfea43d98d" (UID: "1d674f3f-5708-4b77-85fa-7fcfea43d98d"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.764590 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5p8ms\" (UniqueName: \"kubernetes.io/projected/1d674f3f-5708-4b77-85fa-7fcfea43d98d-kube-api-access-5p8ms\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.764856 4685 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/1d674f3f-5708-4b77-85fa-7fcfea43d98d-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.764922 4685 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.765024 4685 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/1d674f3f-5708-4b77-85fa-7fcfea43d98d-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.765090 4685 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.765153 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1d674f3f-5708-4b77-85fa-7fcfea43d98d-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.867025 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.873440 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/031a6182-897b-45d0-a48f-a9473aebe554-etc-swift\") pod \"swift-storage-0\" (UID: \"031a6182-897b-45d0-a48f-a9473aebe554\") " pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:48 crc kubenswrapper[4685]: I0128 12:46:48.949158 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-storage-0" Jan 28 12:46:49 crc kubenswrapper[4685]: I0128 12:46:49.269943 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" event={"ID":"1d674f3f-5708-4b77-85fa-7fcfea43d98d","Type":"ContainerDied","Data":"f4d02f17a1f89aec44dca56ece5b85945fc149ee207fe072cf5a8b29b9cd5d3c"} Jan 28 12:46:49 crc kubenswrapper[4685]: I0128 12:46:49.270284 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4d02f17a1f89aec44dca56ece5b85945fc149ee207fe072cf5a8b29b9cd5d3c" Jan 28 12:46:49 crc kubenswrapper[4685]: I0128 12:46:49.270014 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-ring-rebalance-q4x4r" Jan 28 12:46:49 crc kubenswrapper[4685]: I0128 12:46:49.352416 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-storage-0"] Jan 28 12:46:50 crc kubenswrapper[4685]: I0128 12:46:50.281162 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"96f9235761067593bf9a2d23ef73b0c5cf69d8b79153a3005b5a51b871711105"} Jan 28 12:46:50 crc kubenswrapper[4685]: I0128 12:46:50.492884 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:50 crc kubenswrapper[4685]: I0128 12:46:50.500139 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4a145e58-4189-4128-981c-48608a766854-etc-swift\") pod \"swift-proxy-cb6f749b7-85dsn\" (UID: \"4a145e58-4189-4128-981c-48608a766854\") " pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:50 crc kubenswrapper[4685]: I0128 12:46:50.528938 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:51 crc kubenswrapper[4685]: I0128 12:46:51.289311 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"ec274307ef0ede11ff8e695dc2ec21dece49bc05d028d2558ac66454bba042b2"} Jan 28 12:46:51 crc kubenswrapper[4685]: I0128 12:46:51.338715 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn"] Jan 28 12:46:51 crc kubenswrapper[4685]: W0128 12:46:51.345221 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a145e58_4189_4128_981c_48608a766854.slice/crio-b79a472032d8624d60b3a0991692f4c58f5d84729b5edd6454f9b39074b549d9 WatchSource:0}: Error finding container b79a472032d8624d60b3a0991692f4c58f5d84729b5edd6454f9b39074b549d9: Status 404 returned error can't find the container with id b79a472032d8624d60b3a0991692f4c58f5d84729b5edd6454f9b39074b549d9 Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.305153 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"772432630c80642c29272fc2f41f54849b66d1daf5537718e39d3fd3c1e49e17"} Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.305558 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"c69e3c213e14a0e011284c9a8c622d379eec04fe037a10e848e3c93112d156ee"} Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.305577 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"6c4b4dbcc9d16bcafd4cf63c01474517de6b17e2c3073a5fba6942ced8901fae"} Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.306861 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" event={"ID":"4a145e58-4189-4128-981c-48608a766854","Type":"ContainerStarted","Data":"6a761a13279733061369c6d3f954b2556eb258a5129b04aa2ada657820cb7769"} Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.306900 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" event={"ID":"4a145e58-4189-4128-981c-48608a766854","Type":"ContainerStarted","Data":"d95e5c53f08eae6409081d91f68910f3cf4338bbb67efd113c22967ad9e171b2"} Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.306910 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" event={"ID":"4a145e58-4189-4128-981c-48608a766854","Type":"ContainerStarted","Data":"b79a472032d8624d60b3a0991692f4c58f5d84729b5edd6454f9b39074b549d9"} Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.307042 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.307203 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:46:52 crc kubenswrapper[4685]: I0128 12:46:52.328807 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" podStartSLOduration=18.328787269 podStartE2EDuration="18.328787269s" podCreationTimestamp="2026-01-28 12:46:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:46:52.32597778 +0000 UTC m=+1563.413391635" watchObservedRunningTime="2026-01-28 12:46:52.328787269 +0000 UTC m=+1563.416201104" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.427953 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm"] Jan 28 12:46:53 crc kubenswrapper[4685]: E0128 12:46:53.428325 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d674f3f-5708-4b77-85fa-7fcfea43d98d" containerName="swift-ring-rebalance" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.428342 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d674f3f-5708-4b77-85fa-7fcfea43d98d" containerName="swift-ring-rebalance" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.428494 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d674f3f-5708-4b77-85fa-7fcfea43d98d" containerName="swift-ring-rebalance" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.429619 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.433151 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-dvgnb" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.435211 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm"] Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.543947 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-util\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.544286 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nklh7\" (UniqueName: \"kubernetes.io/projected/31079862-762b-46dd-93f6-c627cca53447-kube-api-access-nklh7\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.544341 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-bundle\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.645726 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-util\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.645820 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nklh7\" (UniqueName: \"kubernetes.io/projected/31079862-762b-46dd-93f6-c627cca53447-kube-api-access-nklh7\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.645869 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-bundle\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.646368 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-bundle\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.646505 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-util\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.671345 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nklh7\" (UniqueName: \"kubernetes.io/projected/31079862-762b-46dd-93f6-c627cca53447-kube-api-access-nklh7\") pod \"737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:53 crc kubenswrapper[4685]: I0128 12:46:53.752791 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:54 crc kubenswrapper[4685]: I0128 12:46:54.164709 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm"] Jan 28 12:46:54 crc kubenswrapper[4685]: W0128 12:46:54.164987 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31079862_762b_46dd_93f6_c627cca53447.slice/crio-9bad9efe10c8e4916dafa9f00f94a5f90f6cbb7b6be3358008edc3f31228e8c2 WatchSource:0}: Error finding container 9bad9efe10c8e4916dafa9f00f94a5f90f6cbb7b6be3358008edc3f31228e8c2: Status 404 returned error can't find the container with id 9bad9efe10c8e4916dafa9f00f94a5f90f6cbb7b6be3358008edc3f31228e8c2 Jan 28 12:46:54 crc kubenswrapper[4685]: I0128 12:46:54.322084 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" event={"ID":"31079862-762b-46dd-93f6-c627cca53447","Type":"ContainerStarted","Data":"711e8aa2ff6285f2bc9dcdba2688df2b1e71589ff7024a96b8514b1fcf89ab1d"} Jan 28 12:46:54 crc kubenswrapper[4685]: I0128 12:46:54.322129 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" event={"ID":"31079862-762b-46dd-93f6-c627cca53447","Type":"ContainerStarted","Data":"9bad9efe10c8e4916dafa9f00f94a5f90f6cbb7b6be3358008edc3f31228e8c2"} Jan 28 12:46:54 crc kubenswrapper[4685]: I0128 12:46:54.327386 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"b1256f489f3f72f031fc67cad7454b477e407150be988b6ffd1d3dc8db7126c7"} Jan 28 12:46:54 crc kubenswrapper[4685]: I0128 12:46:54.327423 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"fd6655dda59613f5a0cc69a5ad518af48c91c5b1f8d428bccbf29ddf788ef179"} Jan 28 12:46:54 crc kubenswrapper[4685]: I0128 12:46:54.327432 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"68579db61dbb232995a5f6d123d0d4721f92d437bbc52d08f5a4a6b1c7d1761b"} Jan 28 12:46:54 crc kubenswrapper[4685]: I0128 12:46:54.327441 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"9db859521f566047ef7f9c4a56dae0ac13652ed13f5c1eb710398f95504ae465"} Jan 28 12:46:55 crc kubenswrapper[4685]: I0128 12:46:55.338205 4685 generic.go:334] "Generic (PLEG): container finished" podID="31079862-762b-46dd-93f6-c627cca53447" containerID="711e8aa2ff6285f2bc9dcdba2688df2b1e71589ff7024a96b8514b1fcf89ab1d" exitCode=0 Jan 28 12:46:55 crc kubenswrapper[4685]: I0128 12:46:55.338280 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" event={"ID":"31079862-762b-46dd-93f6-c627cca53447","Type":"ContainerDied","Data":"711e8aa2ff6285f2bc9dcdba2688df2b1e71589ff7024a96b8514b1fcf89ab1d"} Jan 28 12:46:56 crc kubenswrapper[4685]: I0128 12:46:56.353042 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"28eda0641e1bb429011914b21959de0fa7279a01c032a97ac5cbd2027715ed7a"} Jan 28 12:46:56 crc kubenswrapper[4685]: I0128 12:46:56.353606 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"32d4fcbdaa425beda5689632c04b9c565c935313c1e967e3453d45f9e852eea6"} Jan 28 12:46:56 crc kubenswrapper[4685]: I0128 12:46:56.353618 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"4648300110afc8e10b9a493f98ccdef84d547241f27b1476c383e84323f08e3b"} Jan 28 12:46:56 crc kubenswrapper[4685]: I0128 12:46:56.356268 4685 generic.go:334] "Generic (PLEG): container finished" podID="31079862-762b-46dd-93f6-c627cca53447" containerID="4dce4f1d2cc8ce5d5870899cda5082b045f062af96bec59095a32d2bb7fe032b" exitCode=0 Jan 28 12:46:56 crc kubenswrapper[4685]: I0128 12:46:56.356315 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" event={"ID":"31079862-762b-46dd-93f6-c627cca53447","Type":"ContainerDied","Data":"4dce4f1d2cc8ce5d5870899cda5082b045f062af96bec59095a32d2bb7fe032b"} Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.070101 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.070463 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.368604 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"ac919deb236ad929fbb5d3277ad43ee3da6056cff1a66ddaa7e69ab6a03d87b3"} Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.368649 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"f162e5044665c1f741f6228e43cbbf4734ab51c2dc2f01e8219610d857e28fb6"} Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.368659 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"9f9bcbe23d8e96cff9c3c24d64d9951eba0bbbc7b15e8009d878caf3a5d27668"} Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.368668 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/swift-storage-0" event={"ID":"031a6182-897b-45d0-a48f-a9473aebe554","Type":"ContainerStarted","Data":"c991857c9e498f7fdb1674f9ace2193075a7c338826bb72f4cc41aadf14928ea"} Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.370516 4685 generic.go:334] "Generic (PLEG): container finished" podID="31079862-762b-46dd-93f6-c627cca53447" containerID="f597f06a223cfd92f9bd4412309df23e9812457e434a2b8e2a8d9ea1d795c8ac" exitCode=0 Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.370557 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" event={"ID":"31079862-762b-46dd-93f6-c627cca53447","Type":"ContainerDied","Data":"f597f06a223cfd92f9bd4412309df23e9812457e434a2b8e2a8d9ea1d795c8ac"} Jan 28 12:46:57 crc kubenswrapper[4685]: I0128 12:46:57.442897 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/swift-storage-0" podStartSLOduration=20.19823278 podStartE2EDuration="26.442879055s" podCreationTimestamp="2026-01-28 12:46:31 +0000 UTC" firstStartedPulling="2026-01-28 12:46:49.356292708 +0000 UTC m=+1560.443706543" lastFinishedPulling="2026-01-28 12:46:55.600938983 +0000 UTC m=+1566.688352818" observedRunningTime="2026-01-28 12:46:57.439822699 +0000 UTC m=+1568.527236534" watchObservedRunningTime="2026-01-28 12:46:57.442879055 +0000 UTC m=+1568.530292890" Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.733926 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.826912 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-bundle\") pod \"31079862-762b-46dd-93f6-c627cca53447\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.827036 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nklh7\" (UniqueName: \"kubernetes.io/projected/31079862-762b-46dd-93f6-c627cca53447-kube-api-access-nklh7\") pod \"31079862-762b-46dd-93f6-c627cca53447\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.827216 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-util\") pod \"31079862-762b-46dd-93f6-c627cca53447\" (UID: \"31079862-762b-46dd-93f6-c627cca53447\") " Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.828036 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-bundle" (OuterVolumeSpecName: "bundle") pod "31079862-762b-46dd-93f6-c627cca53447" (UID: "31079862-762b-46dd-93f6-c627cca53447"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.835624 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31079862-762b-46dd-93f6-c627cca53447-kube-api-access-nklh7" (OuterVolumeSpecName: "kube-api-access-nklh7") pod "31079862-762b-46dd-93f6-c627cca53447" (UID: "31079862-762b-46dd-93f6-c627cca53447"). InnerVolumeSpecName "kube-api-access-nklh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.844579 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-util" (OuterVolumeSpecName: "util") pod "31079862-762b-46dd-93f6-c627cca53447" (UID: "31079862-762b-46dd-93f6-c627cca53447"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.929774 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nklh7\" (UniqueName: \"kubernetes.io/projected/31079862-762b-46dd-93f6-c627cca53447-kube-api-access-nklh7\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.929818 4685 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-util\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:58 crc kubenswrapper[4685]: I0128 12:46:58.929828 4685 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/31079862-762b-46dd-93f6-c627cca53447-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:46:59 crc kubenswrapper[4685]: I0128 12:46:59.388481 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" event={"ID":"31079862-762b-46dd-93f6-c627cca53447","Type":"ContainerDied","Data":"9bad9efe10c8e4916dafa9f00f94a5f90f6cbb7b6be3358008edc3f31228e8c2"} Jan 28 12:46:59 crc kubenswrapper[4685]: I0128 12:46:59.388531 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bad9efe10c8e4916dafa9f00f94a5f90f6cbb7b6be3358008edc3f31228e8c2" Jan 28 12:46:59 crc kubenswrapper[4685]: I0128 12:46:59.388562 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm" Jan 28 12:47:00 crc kubenswrapper[4685]: I0128 12:47:00.532160 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:47:00 crc kubenswrapper[4685]: I0128 12:47:00.533317 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/swift-proxy-cb6f749b7-85dsn" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.510404 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g"] Jan 28 12:47:13 crc kubenswrapper[4685]: E0128 12:47:13.511194 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31079862-762b-46dd-93f6-c627cca53447" containerName="pull" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.511207 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="31079862-762b-46dd-93f6-c627cca53447" containerName="pull" Jan 28 12:47:13 crc kubenswrapper[4685]: E0128 12:47:13.511220 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31079862-762b-46dd-93f6-c627cca53447" containerName="extract" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.511225 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="31079862-762b-46dd-93f6-c627cca53447" containerName="extract" Jan 28 12:47:13 crc kubenswrapper[4685]: E0128 12:47:13.511235 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31079862-762b-46dd-93f6-c627cca53447" containerName="util" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.511241 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="31079862-762b-46dd-93f6-c627cca53447" containerName="util" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.511410 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="31079862-762b-46dd-93f6-c627cca53447" containerName="extract" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.511929 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.513707 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-4rknc" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.513834 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-service-cert" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.524007 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g"] Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.635093 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/085fc160-da8c-4422-962f-e14e737e5a42-apiservice-cert\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.635541 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd5pk\" (UniqueName: \"kubernetes.io/projected/085fc160-da8c-4422-962f-e14e737e5a42-kube-api-access-sd5pk\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.636031 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/085fc160-da8c-4422-962f-e14e737e5a42-webhook-cert\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.737701 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/085fc160-da8c-4422-962f-e14e737e5a42-webhook-cert\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.737751 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/085fc160-da8c-4422-962f-e14e737e5a42-apiservice-cert\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.737802 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd5pk\" (UniqueName: \"kubernetes.io/projected/085fc160-da8c-4422-962f-e14e737e5a42-kube-api-access-sd5pk\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.746782 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/085fc160-da8c-4422-962f-e14e737e5a42-webhook-cert\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.750822 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/085fc160-da8c-4422-962f-e14e737e5a42-apiservice-cert\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.759839 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd5pk\" (UniqueName: \"kubernetes.io/projected/085fc160-da8c-4422-962f-e14e737e5a42-kube-api-access-sd5pk\") pod \"glance-operator-controller-manager-d97d5c6-w7s6g\" (UID: \"085fc160-da8c-4422-962f-e14e737e5a42\") " pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:13 crc kubenswrapper[4685]: I0128 12:47:13.834528 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:14 crc kubenswrapper[4685]: I0128 12:47:14.265460 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g"] Jan 28 12:47:14 crc kubenswrapper[4685]: W0128 12:47:14.269671 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod085fc160_da8c_4422_962f_e14e737e5a42.slice/crio-cb3c0fbb1ea6c3ea90bfb2b101f05fae0e68d0ed3c247bd7137799182b31e748 WatchSource:0}: Error finding container cb3c0fbb1ea6c3ea90bfb2b101f05fae0e68d0ed3c247bd7137799182b31e748: Status 404 returned error can't find the container with id cb3c0fbb1ea6c3ea90bfb2b101f05fae0e68d0ed3c247bd7137799182b31e748 Jan 28 12:47:14 crc kubenswrapper[4685]: I0128 12:47:14.499115 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" event={"ID":"085fc160-da8c-4422-962f-e14e737e5a42","Type":"ContainerStarted","Data":"cb3c0fbb1ea6c3ea90bfb2b101f05fae0e68d0ed3c247bd7137799182b31e748"} Jan 28 12:47:16 crc kubenswrapper[4685]: I0128 12:47:16.515121 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" event={"ID":"085fc160-da8c-4422-962f-e14e737e5a42","Type":"ContainerStarted","Data":"5e763600579ea596f295413f5f8d75277d5ef0efd79c608addc80fc9ce122ce6"} Jan 28 12:47:16 crc kubenswrapper[4685]: I0128 12:47:16.515431 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:16 crc kubenswrapper[4685]: I0128 12:47:16.538829 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" podStartSLOduration=2.22838604 podStartE2EDuration="3.538806667s" podCreationTimestamp="2026-01-28 12:47:13 +0000 UTC" firstStartedPulling="2026-01-28 12:47:14.272720742 +0000 UTC m=+1585.360134577" lastFinishedPulling="2026-01-28 12:47:15.583141359 +0000 UTC m=+1586.670555204" observedRunningTime="2026-01-28 12:47:16.533373093 +0000 UTC m=+1587.620786928" watchObservedRunningTime="2026-01-28 12:47:16.538806667 +0000 UTC m=+1587.626220512" Jan 28 12:47:23 crc kubenswrapper[4685]: I0128 12:47:23.840463 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-d97d5c6-w7s6g" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.070054 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.070421 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.070471 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.071195 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.071258 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" gracePeriod=600 Jan 28 12:47:27 crc kubenswrapper[4685]: E0128 12:47:27.191477 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.593208 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" exitCode=0 Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.593259 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65"} Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.593301 4685 scope.go:117] "RemoveContainer" containerID="fe6797eb1526f7a98dbf830c75b37ec963fdbf0115e31ebdafc20e877843581c" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.593795 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:47:27 crc kubenswrapper[4685]: E0128 12:47:27.594026 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.673876 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-create-bd7fb"] Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.674778 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.686840 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-bd7fb"] Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.696773 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf"] Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.697651 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.705820 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-db-secret" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.707777 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/openstackclient"] Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.708807 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.710104 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-config" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.710145 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"openstack-config-secret" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.710314 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"default-dockercfg-xqmkf" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.711063 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"glance-kuttl-tests"/"openstack-scripts-9db6gc427h" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.715654 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf"] Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.721728 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstackclient"] Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.734988 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntkxj\" (UniqueName: \"kubernetes.io/projected/06252eec-03e7-48a6-ba7b-567b233e3ee1-kube-api-access-ntkxj\") pod \"glance-db-create-bd7fb\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.735199 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06252eec-03e7-48a6-ba7b-567b233e3ee1-operator-scripts\") pod \"glance-db-create-bd7fb\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837102 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06252eec-03e7-48a6-ba7b-567b233e3ee1-operator-scripts\") pod \"glance-db-create-bd7fb\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837158 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-config-secret\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837248 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09ae1181-ad73-4a92-8676-df118044b818-operator-scripts\") pod \"glance-cf6a-account-create-update-hl5sf\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837282 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zfjc\" (UniqueName: \"kubernetes.io/projected/09ae1181-ad73-4a92-8676-df118044b818-kube-api-access-7zfjc\") pod \"glance-cf6a-account-create-update-hl5sf\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837312 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntkxj\" (UniqueName: \"kubernetes.io/projected/06252eec-03e7-48a6-ba7b-567b233e3ee1-kube-api-access-ntkxj\") pod \"glance-db-create-bd7fb\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837348 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-config\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837541 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-scripts\" (UniqueName: \"kubernetes.io/configmap/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-scripts\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.837618 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f9b4\" (UniqueName: \"kubernetes.io/projected/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-kube-api-access-4f9b4\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.838466 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06252eec-03e7-48a6-ba7b-567b233e3ee1-operator-scripts\") pod \"glance-db-create-bd7fb\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.866148 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntkxj\" (UniqueName: \"kubernetes.io/projected/06252eec-03e7-48a6-ba7b-567b233e3ee1-kube-api-access-ntkxj\") pod \"glance-db-create-bd7fb\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.940428 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-config-secret\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.940555 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09ae1181-ad73-4a92-8676-df118044b818-operator-scripts\") pod \"glance-cf6a-account-create-update-hl5sf\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.940589 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zfjc\" (UniqueName: \"kubernetes.io/projected/09ae1181-ad73-4a92-8676-df118044b818-kube-api-access-7zfjc\") pod \"glance-cf6a-account-create-update-hl5sf\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.940631 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-config\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.940663 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-scripts\" (UniqueName: \"kubernetes.io/configmap/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-scripts\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.940688 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f9b4\" (UniqueName: \"kubernetes.io/projected/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-kube-api-access-4f9b4\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.941435 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09ae1181-ad73-4a92-8676-df118044b818-operator-scripts\") pod \"glance-cf6a-account-create-update-hl5sf\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.941901 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-config\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.941978 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-scripts\" (UniqueName: \"kubernetes.io/configmap/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-scripts\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.944088 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-openstack-config-secret\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.957576 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f9b4\" (UniqueName: \"kubernetes.io/projected/2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7-kube-api-access-4f9b4\") pod \"openstackclient\" (UID: \"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7\") " pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:27 crc kubenswrapper[4685]: I0128 12:47:27.963365 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zfjc\" (UniqueName: \"kubernetes.io/projected/09ae1181-ad73-4a92-8676-df118044b818-kube-api-access-7zfjc\") pod \"glance-cf6a-account-create-update-hl5sf\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.015330 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.029659 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.036027 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/openstackclient" Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.500754 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-bd7fb"] Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.557851 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf"] Jan 28 12:47:28 crc kubenswrapper[4685]: W0128 12:47:28.562976 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09ae1181_ad73_4a92_8676_df118044b818.slice/crio-0e753f25257e805b8013a2ca2e0f7f36f1c16ad307d740f89b29ecc03327137c WatchSource:0}: Error finding container 0e753f25257e805b8013a2ca2e0f7f36f1c16ad307d740f89b29ecc03327137c: Status 404 returned error can't find the container with id 0e753f25257e805b8013a2ca2e0f7f36f1c16ad307d740f89b29ecc03327137c Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.564511 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/openstackclient"] Jan 28 12:47:28 crc kubenswrapper[4685]: W0128 12:47:28.565509 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a47bf9a_a943_4bcc_8a19_be4f8db1cfa7.slice/crio-b6f14400615acb7184608bc5702239285e9f6c2e1f825917582ea3ed5f2b31ff WatchSource:0}: Error finding container b6f14400615acb7184608bc5702239285e9f6c2e1f825917582ea3ed5f2b31ff: Status 404 returned error can't find the container with id b6f14400615acb7184608bc5702239285e9f6c2e1f825917582ea3ed5f2b31ff Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.600519 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstackclient" event={"ID":"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7","Type":"ContainerStarted","Data":"b6f14400615acb7184608bc5702239285e9f6c2e1f825917582ea3ed5f2b31ff"} Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.601575 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-bd7fb" event={"ID":"06252eec-03e7-48a6-ba7b-567b233e3ee1","Type":"ContainerStarted","Data":"1c2eb24da5fc19f6b0981f57a3187ba10314f312449cab2562b9e3d1c0ac41e5"} Jan 28 12:47:28 crc kubenswrapper[4685]: I0128 12:47:28.605000 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" event={"ID":"09ae1181-ad73-4a92-8676-df118044b818","Type":"ContainerStarted","Data":"0e753f25257e805b8013a2ca2e0f7f36f1c16ad307d740f89b29ecc03327137c"} Jan 28 12:47:29 crc kubenswrapper[4685]: I0128 12:47:29.617502 4685 generic.go:334] "Generic (PLEG): container finished" podID="06252eec-03e7-48a6-ba7b-567b233e3ee1" containerID="80963df0789d33cfb44207aa62db4d581166e71f6a8c435372b377cfcf6ab186" exitCode=0 Jan 28 12:47:29 crc kubenswrapper[4685]: I0128 12:47:29.617570 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-bd7fb" event={"ID":"06252eec-03e7-48a6-ba7b-567b233e3ee1","Type":"ContainerDied","Data":"80963df0789d33cfb44207aa62db4d581166e71f6a8c435372b377cfcf6ab186"} Jan 28 12:47:29 crc kubenswrapper[4685]: I0128 12:47:29.621137 4685 generic.go:334] "Generic (PLEG): container finished" podID="09ae1181-ad73-4a92-8676-df118044b818" containerID="2669ef2c9e27ff4eeaabca032f749a054eaf41467eca1db998dc86e70b5665a5" exitCode=0 Jan 28 12:47:29 crc kubenswrapper[4685]: I0128 12:47:29.621222 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" event={"ID":"09ae1181-ad73-4a92-8676-df118044b818","Type":"ContainerDied","Data":"2669ef2c9e27ff4eeaabca032f749a054eaf41467eca1db998dc86e70b5665a5"} Jan 28 12:47:30 crc kubenswrapper[4685]: I0128 12:47:30.996984 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.003481 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.085408 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zfjc\" (UniqueName: \"kubernetes.io/projected/09ae1181-ad73-4a92-8676-df118044b818-kube-api-access-7zfjc\") pod \"09ae1181-ad73-4a92-8676-df118044b818\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.085474 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06252eec-03e7-48a6-ba7b-567b233e3ee1-operator-scripts\") pod \"06252eec-03e7-48a6-ba7b-567b233e3ee1\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.085528 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09ae1181-ad73-4a92-8676-df118044b818-operator-scripts\") pod \"09ae1181-ad73-4a92-8676-df118044b818\" (UID: \"09ae1181-ad73-4a92-8676-df118044b818\") " Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.085645 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntkxj\" (UniqueName: \"kubernetes.io/projected/06252eec-03e7-48a6-ba7b-567b233e3ee1-kube-api-access-ntkxj\") pod \"06252eec-03e7-48a6-ba7b-567b233e3ee1\" (UID: \"06252eec-03e7-48a6-ba7b-567b233e3ee1\") " Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.086498 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06252eec-03e7-48a6-ba7b-567b233e3ee1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "06252eec-03e7-48a6-ba7b-567b233e3ee1" (UID: "06252eec-03e7-48a6-ba7b-567b233e3ee1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.086923 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06252eec-03e7-48a6-ba7b-567b233e3ee1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.087115 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae1181-ad73-4a92-8676-df118044b818-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "09ae1181-ad73-4a92-8676-df118044b818" (UID: "09ae1181-ad73-4a92-8676-df118044b818"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.092493 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae1181-ad73-4a92-8676-df118044b818-kube-api-access-7zfjc" (OuterVolumeSpecName: "kube-api-access-7zfjc") pod "09ae1181-ad73-4a92-8676-df118044b818" (UID: "09ae1181-ad73-4a92-8676-df118044b818"). InnerVolumeSpecName "kube-api-access-7zfjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.095490 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06252eec-03e7-48a6-ba7b-567b233e3ee1-kube-api-access-ntkxj" (OuterVolumeSpecName: "kube-api-access-ntkxj") pod "06252eec-03e7-48a6-ba7b-567b233e3ee1" (UID: "06252eec-03e7-48a6-ba7b-567b233e3ee1"). InnerVolumeSpecName "kube-api-access-ntkxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.189298 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntkxj\" (UniqueName: \"kubernetes.io/projected/06252eec-03e7-48a6-ba7b-567b233e3ee1-kube-api-access-ntkxj\") on node \"crc\" DevicePath \"\"" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.189345 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zfjc\" (UniqueName: \"kubernetes.io/projected/09ae1181-ad73-4a92-8676-df118044b818-kube-api-access-7zfjc\") on node \"crc\" DevicePath \"\"" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.189359 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09ae1181-ad73-4a92-8676-df118044b818-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.636794 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-bd7fb" event={"ID":"06252eec-03e7-48a6-ba7b-567b233e3ee1","Type":"ContainerDied","Data":"1c2eb24da5fc19f6b0981f57a3187ba10314f312449cab2562b9e3d1c0ac41e5"} Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.636855 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c2eb24da5fc19f6b0981f57a3187ba10314f312449cab2562b9e3d1c0ac41e5" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.636870 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-bd7fb" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.638194 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" event={"ID":"09ae1181-ad73-4a92-8676-df118044b818","Type":"ContainerDied","Data":"0e753f25257e805b8013a2ca2e0f7f36f1c16ad307d740f89b29ecc03327137c"} Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.638235 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e753f25257e805b8013a2ca2e0f7f36f1c16ad307d740f89b29ecc03327137c" Jan 28 12:47:31 crc kubenswrapper[4685]: I0128 12:47:31.638334 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.817207 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-bvrmc"] Jan 28 12:47:32 crc kubenswrapper[4685]: E0128 12:47:32.817753 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06252eec-03e7-48a6-ba7b-567b233e3ee1" containerName="mariadb-database-create" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.817768 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="06252eec-03e7-48a6-ba7b-567b233e3ee1" containerName="mariadb-database-create" Jan 28 12:47:32 crc kubenswrapper[4685]: E0128 12:47:32.817804 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ae1181-ad73-4a92-8676-df118044b818" containerName="mariadb-account-create-update" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.817812 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ae1181-ad73-4a92-8676-df118044b818" containerName="mariadb-account-create-update" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.817964 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="09ae1181-ad73-4a92-8676-df118044b818" containerName="mariadb-account-create-update" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.817988 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="06252eec-03e7-48a6-ba7b-567b233e3ee1" containerName="mariadb-database-create" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.818552 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.821027 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bvrmc"] Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.828426 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.828531 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-684br" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.918792 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-config-data\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.918894 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-db-sync-config-data\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:32 crc kubenswrapper[4685]: I0128 12:47:32.918961 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvr4n\" (UniqueName: \"kubernetes.io/projected/53646360-80c2-4065-a782-f3172609cb2a-kube-api-access-rvr4n\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:33 crc kubenswrapper[4685]: I0128 12:47:33.020269 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-config-data\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:33 crc kubenswrapper[4685]: I0128 12:47:33.020363 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-db-sync-config-data\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:33 crc kubenswrapper[4685]: I0128 12:47:33.020433 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvr4n\" (UniqueName: \"kubernetes.io/projected/53646360-80c2-4065-a782-f3172609cb2a-kube-api-access-rvr4n\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:33 crc kubenswrapper[4685]: I0128 12:47:33.037015 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-db-sync-config-data\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:33 crc kubenswrapper[4685]: I0128 12:47:33.037059 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvr4n\" (UniqueName: \"kubernetes.io/projected/53646360-80c2-4065-a782-f3172609cb2a-kube-api-access-rvr4n\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:33 crc kubenswrapper[4685]: I0128 12:47:33.040741 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-config-data\") pod \"glance-db-sync-bvrmc\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:33 crc kubenswrapper[4685]: I0128 12:47:33.138926 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:47:36 crc kubenswrapper[4685]: I0128 12:47:36.250754 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bvrmc"] Jan 28 12:47:36 crc kubenswrapper[4685]: W0128 12:47:36.263595 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53646360_80c2_4065_a782_f3172609cb2a.slice/crio-358ace4929523664b1435c421d6355fd467f0c3fc2fdbf65a8a17f7946e17f41 WatchSource:0}: Error finding container 358ace4929523664b1435c421d6355fd467f0c3fc2fdbf65a8a17f7946e17f41: Status 404 returned error can't find the container with id 358ace4929523664b1435c421d6355fd467f0c3fc2fdbf65a8a17f7946e17f41 Jan 28 12:47:36 crc kubenswrapper[4685]: I0128 12:47:36.684523 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bvrmc" event={"ID":"53646360-80c2-4065-a782-f3172609cb2a","Type":"ContainerStarted","Data":"358ace4929523664b1435c421d6355fd467f0c3fc2fdbf65a8a17f7946e17f41"} Jan 28 12:47:36 crc kubenswrapper[4685]: I0128 12:47:36.687565 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/openstackclient" event={"ID":"2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7","Type":"ContainerStarted","Data":"437e2624972311c9cd370d4af31229120e6ec797d1054cd5207b1325d9f15ec0"} Jan 28 12:47:36 crc kubenswrapper[4685]: I0128 12:47:36.704517 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/openstackclient" podStartSLOduration=2.333181393 podStartE2EDuration="9.704498699s" podCreationTimestamp="2026-01-28 12:47:27 +0000 UTC" firstStartedPulling="2026-01-28 12:47:28.568242988 +0000 UTC m=+1599.655656823" lastFinishedPulling="2026-01-28 12:47:35.939560294 +0000 UTC m=+1607.026974129" observedRunningTime="2026-01-28 12:47:36.700753473 +0000 UTC m=+1607.788167308" watchObservedRunningTime="2026-01-28 12:47:36.704498699 +0000 UTC m=+1607.791912544" Jan 28 12:47:39 crc kubenswrapper[4685]: I0128 12:47:39.546376 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:47:39 crc kubenswrapper[4685]: E0128 12:47:39.547102 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:47:46 crc kubenswrapper[4685]: I0128 12:47:46.757920 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bvrmc" event={"ID":"53646360-80c2-4065-a782-f3172609cb2a","Type":"ContainerStarted","Data":"3235284ae6f2d9977599382e9098d374ce32da2197b31d89beed0a2208f3108b"} Jan 28 12:47:46 crc kubenswrapper[4685]: I0128 12:47:46.780918 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-bvrmc" podStartSLOduration=4.743676897 podStartE2EDuration="14.780896818s" podCreationTimestamp="2026-01-28 12:47:32 +0000 UTC" firstStartedPulling="2026-01-28 12:47:36.265706514 +0000 UTC m=+1607.353120349" lastFinishedPulling="2026-01-28 12:47:46.302926445 +0000 UTC m=+1617.390340270" observedRunningTime="2026-01-28 12:47:46.773311523 +0000 UTC m=+1617.860725378" watchObservedRunningTime="2026-01-28 12:47:46.780896818 +0000 UTC m=+1617.868310653" Jan 28 12:47:51 crc kubenswrapper[4685]: I0128 12:47:51.546330 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:47:51 crc kubenswrapper[4685]: E0128 12:47:51.546996 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:47:58 crc kubenswrapper[4685]: I0128 12:47:58.845698 4685 generic.go:334] "Generic (PLEG): container finished" podID="53646360-80c2-4065-a782-f3172609cb2a" containerID="3235284ae6f2d9977599382e9098d374ce32da2197b31d89beed0a2208f3108b" exitCode=0 Jan 28 12:47:58 crc kubenswrapper[4685]: I0128 12:47:58.845816 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bvrmc" event={"ID":"53646360-80c2-4065-a782-f3172609cb2a","Type":"ContainerDied","Data":"3235284ae6f2d9977599382e9098d374ce32da2197b31d89beed0a2208f3108b"} Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.135000 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.215762 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-db-sync-config-data\") pod \"53646360-80c2-4065-a782-f3172609cb2a\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.215930 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvr4n\" (UniqueName: \"kubernetes.io/projected/53646360-80c2-4065-a782-f3172609cb2a-kube-api-access-rvr4n\") pod \"53646360-80c2-4065-a782-f3172609cb2a\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.216023 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-config-data\") pod \"53646360-80c2-4065-a782-f3172609cb2a\" (UID: \"53646360-80c2-4065-a782-f3172609cb2a\") " Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.220645 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53646360-80c2-4065-a782-f3172609cb2a-kube-api-access-rvr4n" (OuterVolumeSpecName: "kube-api-access-rvr4n") pod "53646360-80c2-4065-a782-f3172609cb2a" (UID: "53646360-80c2-4065-a782-f3172609cb2a"). InnerVolumeSpecName "kube-api-access-rvr4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.221199 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "53646360-80c2-4065-a782-f3172609cb2a" (UID: "53646360-80c2-4065-a782-f3172609cb2a"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.252315 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-config-data" (OuterVolumeSpecName: "config-data") pod "53646360-80c2-4065-a782-f3172609cb2a" (UID: "53646360-80c2-4065-a782-f3172609cb2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.317937 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.318010 4685 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53646360-80c2-4065-a782-f3172609cb2a-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.318027 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvr4n\" (UniqueName: \"kubernetes.io/projected/53646360-80c2-4065-a782-f3172609cb2a-kube-api-access-rvr4n\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.867312 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-bvrmc" event={"ID":"53646360-80c2-4065-a782-f3172609cb2a","Type":"ContainerDied","Data":"358ace4929523664b1435c421d6355fd467f0c3fc2fdbf65a8a17f7946e17f41"} Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.867367 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="358ace4929523664b1435c421d6355fd467f0c3fc2fdbf65a8a17f7946e17f41" Jan 28 12:48:00 crc kubenswrapper[4685]: I0128 12:48:00.867382 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-bvrmc" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.141452 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:48:02 crc kubenswrapper[4685]: E0128 12:48:02.142112 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53646360-80c2-4065-a782-f3172609cb2a" containerName="glance-db-sync" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.142130 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="53646360-80c2-4065-a782-f3172609cb2a" containerName="glance-db-sync" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.142300 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="53646360-80c2-4065-a782-f3172609cb2a" containerName="glance-db-sync" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.147252 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.150912 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.151351 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-single-config-data" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.151849 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-684br" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.156675 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.219385 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.221073 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.228974 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.244524 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346359 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-config-data\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346417 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-lib-modules\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346440 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346473 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-lib-modules\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346517 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346538 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-httpd-run\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346559 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-nvme\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346582 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-sys\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346603 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-nvme\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346624 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-scripts\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346784 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346878 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346917 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.346941 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-dev\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347001 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-config-data\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347028 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-logs\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347048 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-run\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347117 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347137 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-scripts\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347226 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-run\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347287 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-httpd-run\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347335 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347358 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl4ct\" (UniqueName: \"kubernetes.io/projected/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-kube-api-access-vl4ct\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347398 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hx8v\" (UniqueName: \"kubernetes.io/projected/2ce87fba-cdd7-4110-aac2-14c174e7e46d-kube-api-access-6hx8v\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347438 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-logs\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347479 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-sys\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347520 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347572 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-dev\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.347763 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") device mount path \"/mnt/openstack/pv17\"" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.376538 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448430 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-sys\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448482 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448514 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-dev\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448528 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-sys\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448541 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-config-data\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448615 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-lib-modules\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448648 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448695 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-lib-modules\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448738 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448758 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-nvme\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448775 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-httpd-run\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448800 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-sys\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448821 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-nvme\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448835 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-scripts\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448867 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448892 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448915 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448913 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448948 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-dev\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.448932 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-dev\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449008 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-config-data\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449044 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-logs\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449071 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-run\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449117 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449144 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-scripts\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449199 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-run\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449270 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-httpd-run\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449321 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl4ct\" (UniqueName: \"kubernetes.io/projected/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-kube-api-access-vl4ct\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449353 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hx8v\" (UniqueName: \"kubernetes.io/projected/2ce87fba-cdd7-4110-aac2-14c174e7e46d-kube-api-access-6hx8v\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449400 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-logs\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449501 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-httpd-run\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449537 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-sys\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449566 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-nvme\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449929 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-run\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449942 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-logs\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449967 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-lib-modules\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449990 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450009 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-lib-modules\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450029 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-run\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450144 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") device mount path \"/mnt/openstack/pv08\"" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450300 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450384 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") device mount path \"/mnt/openstack/pv20\"" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450411 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-httpd-run\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450465 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.450586 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") device mount path \"/mnt/openstack/pv09\"" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.449045 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-nvme\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.451109 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-logs\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.451120 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-dev\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.459918 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-scripts\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.460965 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-config-data\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.472253 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-scripts\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.474737 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl4ct\" (UniqueName: \"kubernetes.io/projected/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-kube-api-access-vl4ct\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.476632 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hx8v\" (UniqueName: \"kubernetes.io/projected/2ce87fba-cdd7-4110-aac2-14c174e7e46d-kube-api-access-6hx8v\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.477379 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.478898 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-config-data\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.481803 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.503102 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-single-1\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.537635 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.769028 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:02 crc kubenswrapper[4685]: I0128 12:48:02.948891 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.200331 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.546047 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:48:03 crc kubenswrapper[4685]: E0128 12:48:03.546847 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.890487 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"2ce87fba-cdd7-4110-aac2-14c174e7e46d","Type":"ContainerStarted","Data":"fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9"} Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.890855 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"2ce87fba-cdd7-4110-aac2-14c174e7e46d","Type":"ContainerStarted","Data":"af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed"} Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.890872 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"2ce87fba-cdd7-4110-aac2-14c174e7e46d","Type":"ContainerStarted","Data":"4e3428c6662e57adc21db572f7f532f6cacd59813dd3d7259a6040ba4b3d2737"} Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.893192 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4","Type":"ContainerStarted","Data":"598cc903becf3fee5c74081d1912af8c207d621c2d547575eb7dc889b5ccb598"} Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.893399 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4","Type":"ContainerStarted","Data":"bbea7998d370cf9b7d1f9257e48712f0a2c7f7b8d3d2d4156b93ea3a4f6d3e1b"} Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.893447 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4","Type":"ContainerStarted","Data":"53b79547ee13551f46021ed1e3445192fb6cfaa743b5a1e42de2e35170c4efc2"} Jan 28 12:48:03 crc kubenswrapper[4685]: I0128 12:48:03.912680 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-1" podStartSLOduration=1.912666003 podStartE2EDuration="1.912666003s" podCreationTimestamp="2026-01-28 12:48:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:03.910643156 +0000 UTC m=+1634.998056991" watchObservedRunningTime="2026-01-28 12:48:03.912666003 +0000 UTC m=+1635.000079838" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.538753 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.539359 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.563544 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.577105 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.589817 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-0" podStartSLOduration=11.589797176 podStartE2EDuration="11.589797176s" podCreationTimestamp="2026-01-28 12:48:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:03.93446785 +0000 UTC m=+1635.021881685" watchObservedRunningTime="2026-01-28 12:48:12.589797176 +0000 UTC m=+1643.677211011" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.769456 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.769537 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.794600 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.807906 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.959866 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.959924 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.959935 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:12 crc kubenswrapper[4685]: I0128 12:48:12.959943 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:16 crc kubenswrapper[4685]: I0128 12:48:16.121251 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:16 crc kubenswrapper[4685]: I0128 12:48:16.121663 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:48:16 crc kubenswrapper[4685]: I0128 12:48:16.128884 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:16 crc kubenswrapper[4685]: I0128 12:48:16.128985 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:48:16 crc kubenswrapper[4685]: I0128 12:48:16.129998 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:16 crc kubenswrapper[4685]: I0128 12:48:16.147375 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:16 crc kubenswrapper[4685]: I0128 12:48:16.189800 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:17 crc kubenswrapper[4685]: I0128 12:48:17.545973 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:48:17 crc kubenswrapper[4685]: E0128 12:48:17.546581 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:48:17 crc kubenswrapper[4685]: I0128 12:48:17.996137 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-log" containerID="cri-o://bbea7998d370cf9b7d1f9257e48712f0a2c7f7b8d3d2d4156b93ea3a4f6d3e1b" gracePeriod=30 Jan 28 12:48:17 crc kubenswrapper[4685]: I0128 12:48:17.996256 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-httpd" containerID="cri-o://598cc903becf3fee5c74081d1912af8c207d621c2d547575eb7dc889b5ccb598" gracePeriod=30 Jan 28 12:48:19 crc kubenswrapper[4685]: I0128 12:48:19.004012 4685 generic.go:334] "Generic (PLEG): container finished" podID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerID="bbea7998d370cf9b7d1f9257e48712f0a2c7f7b8d3d2d4156b93ea3a4f6d3e1b" exitCode=143 Jan 28 12:48:19 crc kubenswrapper[4685]: I0128 12:48:19.004366 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4","Type":"ContainerDied","Data":"bbea7998d370cf9b7d1f9257e48712f0a2c7f7b8d3d2d4156b93ea3a4f6d3e1b"} Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.027591 4685 generic.go:334] "Generic (PLEG): container finished" podID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerID="598cc903becf3fee5c74081d1912af8c207d621c2d547575eb7dc889b5ccb598" exitCode=0 Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.027665 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4","Type":"ContainerDied","Data":"598cc903becf3fee5c74081d1912af8c207d621c2d547575eb7dc889b5ccb598"} Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.028027 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4","Type":"ContainerDied","Data":"53b79547ee13551f46021ed1e3445192fb6cfaa743b5a1e42de2e35170c4efc2"} Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.028046 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53b79547ee13551f46021ed1e3445192fb6cfaa743b5a1e42de2e35170c4efc2" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.028260 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054298 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-run\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054349 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-scripts\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054382 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-httpd-run\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054420 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-nvme\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054456 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-run" (OuterVolumeSpecName: "run") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054579 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054813 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054848 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-logs\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054918 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-var-locks-brick\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054945 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-logs" (OuterVolumeSpecName: "logs") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054994 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.054950 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-sys\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055118 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055482 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-config-data\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055504 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-dev\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055552 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-iscsi\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055575 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-lib-modules\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055593 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vl4ct\" (UniqueName: \"kubernetes.io/projected/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-kube-api-access-vl4ct\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055608 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\" (UID: \"b1f03937-57cc-4ed1-b6aa-73a1798bf0f4\") " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055730 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055755 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-dev" (OuterVolumeSpecName: "dev") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.055771 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056220 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056246 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056259 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056270 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056283 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056294 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056305 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.056315 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.058744 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-sys" (OuterVolumeSpecName: "sys") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.065151 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage17-crc" (OuterVolumeSpecName: "glance-cache") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "local-storage17-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.077055 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-scripts" (OuterVolumeSpecName: "scripts") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.077599 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage20-crc" (OuterVolumeSpecName: "glance") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "local-storage20-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.078220 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-kube-api-access-vl4ct" (OuterVolumeSpecName: "kube-api-access-vl4ct") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "kube-api-access-vl4ct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.105822 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-config-data" (OuterVolumeSpecName: "config-data") pod "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" (UID: "b1f03937-57cc-4ed1-b6aa-73a1798bf0f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.158016 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.158077 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.158090 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.158101 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vl4ct\" (UniqueName: \"kubernetes.io/projected/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-kube-api-access-vl4ct\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.158117 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" " Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.158125 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.184095 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage17-crc" (UniqueName: "kubernetes.io/local-volume/local-storage17-crc") on node "crc" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.185198 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage20-crc" (UniqueName: "kubernetes.io/local-volume/local-storage20-crc") on node "crc" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.259412 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:22 crc kubenswrapper[4685]: I0128 12:48:22.259449 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.034735 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.061690 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.067662 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.086815 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:23 crc kubenswrapper[4685]: E0128 12:48:23.087109 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-log" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.087128 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-log" Jan 28 12:48:23 crc kubenswrapper[4685]: E0128 12:48:23.087147 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-httpd" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.087154 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-httpd" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.087298 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-log" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.087320 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" containerName="glance-httpd" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.088033 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.098382 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273067 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-nvme\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273114 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-httpd-run\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273156 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-run\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273194 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-logs\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273216 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-config-data\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273438 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273474 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-sys\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273515 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-dev\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273540 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzlf7\" (UniqueName: \"kubernetes.io/projected/352c6743-9f05-432d-a988-54558eeece77-kube-api-access-gzlf7\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273560 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273580 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-scripts\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273840 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273899 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.273935 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-lib-modules\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.374995 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-run\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375046 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-logs\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375066 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-config-data\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375106 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375102 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-run\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375141 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-sys\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375207 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-dev\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375235 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzlf7\" (UniqueName: \"kubernetes.io/projected/352c6743-9f05-432d-a988-54558eeece77-kube-api-access-gzlf7\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375256 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375281 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-scripts\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375304 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-dev\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375340 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375362 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375376 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") device mount path \"/mnt/openstack/pv17\"" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375387 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-lib-modules\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375412 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-nvme\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375431 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-httpd-run\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375365 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375625 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") device mount path \"/mnt/openstack/pv20\"" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375711 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-lib-modules\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375775 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375786 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-logs\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375824 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-nvme\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.375885 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-httpd-run\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.377956 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-sys\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.380113 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-scripts\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.392416 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-config-data\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.394460 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzlf7\" (UniqueName: \"kubernetes.io/projected/352c6743-9f05-432d-a988-54558eeece77-kube-api-access-gzlf7\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.409625 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.423263 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-0\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:23 crc kubenswrapper[4685]: I0128 12:48:23.702310 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:24 crc kubenswrapper[4685]: I0128 12:48:24.051194 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:24 crc kubenswrapper[4685]: I0128 12:48:24.556336 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1f03937-57cc-4ed1-b6aa-73a1798bf0f4" path="/var/lib/kubelet/pods/b1f03937-57cc-4ed1-b6aa-73a1798bf0f4/volumes" Jan 28 12:48:25 crc kubenswrapper[4685]: I0128 12:48:25.056789 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"352c6743-9f05-432d-a988-54558eeece77","Type":"ContainerStarted","Data":"cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e"} Jan 28 12:48:25 crc kubenswrapper[4685]: I0128 12:48:25.057045 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"352c6743-9f05-432d-a988-54558eeece77","Type":"ContainerStarted","Data":"6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04"} Jan 28 12:48:25 crc kubenswrapper[4685]: I0128 12:48:25.057056 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"352c6743-9f05-432d-a988-54558eeece77","Type":"ContainerStarted","Data":"4f5caef80ed9a3c698dcc39fc0125f14ff5d5a18779ff35dde6608173a3a5d2d"} Jan 28 12:48:25 crc kubenswrapper[4685]: I0128 12:48:25.081337 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-0" podStartSLOduration=2.081315879 podStartE2EDuration="2.081315879s" podCreationTimestamp="2026-01-28 12:48:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:25.077889072 +0000 UTC m=+1656.165302917" watchObservedRunningTime="2026-01-28 12:48:25.081315879 +0000 UTC m=+1656.168729714" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.389503 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bfldq"] Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.391610 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.414760 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bfldq"] Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.453159 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65420e05-082f-4ad4-94f9-01079a28394b-utilities\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.453272 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65420e05-082f-4ad4-94f9-01079a28394b-catalog-content\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.453292 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzmts\" (UniqueName: \"kubernetes.io/projected/65420e05-082f-4ad4-94f9-01079a28394b-kube-api-access-bzmts\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.554516 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65420e05-082f-4ad4-94f9-01079a28394b-utilities\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.554596 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65420e05-082f-4ad4-94f9-01079a28394b-catalog-content\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.554616 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzmts\" (UniqueName: \"kubernetes.io/projected/65420e05-082f-4ad4-94f9-01079a28394b-kube-api-access-bzmts\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.555460 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65420e05-082f-4ad4-94f9-01079a28394b-utilities\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.555910 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65420e05-082f-4ad4-94f9-01079a28394b-catalog-content\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.584689 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzmts\" (UniqueName: \"kubernetes.io/projected/65420e05-082f-4ad4-94f9-01079a28394b-kube-api-access-bzmts\") pod \"redhat-operators-bfldq\" (UID: \"65420e05-082f-4ad4-94f9-01079a28394b\") " pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:28 crc kubenswrapper[4685]: I0128 12:48:28.710071 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:29 crc kubenswrapper[4685]: I0128 12:48:29.129590 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bfldq"] Jan 28 12:48:30 crc kubenswrapper[4685]: I0128 12:48:30.093228 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfldq" event={"ID":"65420e05-082f-4ad4-94f9-01079a28394b","Type":"ContainerDied","Data":"d202d0dda2aa02bc11b72461d23fd0671a634f88b554a896af42bb930ccd72a6"} Jan 28 12:48:30 crc kubenswrapper[4685]: I0128 12:48:30.093700 4685 generic.go:334] "Generic (PLEG): container finished" podID="65420e05-082f-4ad4-94f9-01079a28394b" containerID="d202d0dda2aa02bc11b72461d23fd0671a634f88b554a896af42bb930ccd72a6" exitCode=0 Jan 28 12:48:30 crc kubenswrapper[4685]: I0128 12:48:30.093753 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfldq" event={"ID":"65420e05-082f-4ad4-94f9-01079a28394b","Type":"ContainerStarted","Data":"e2e0fdb1b2921357935a75fd1f63f74f75b06e4c06d195f450c245d7ef09cef8"} Jan 28 12:48:30 crc kubenswrapper[4685]: I0128 12:48:30.550241 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:48:30 crc kubenswrapper[4685]: E0128 12:48:30.550761 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:48:33 crc kubenswrapper[4685]: I0128 12:48:33.703589 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:33 crc kubenswrapper[4685]: I0128 12:48:33.703705 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:33 crc kubenswrapper[4685]: I0128 12:48:33.738320 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:33 crc kubenswrapper[4685]: I0128 12:48:33.742821 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:34 crc kubenswrapper[4685]: I0128 12:48:34.136527 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:34 crc kubenswrapper[4685]: I0128 12:48:34.136570 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:36 crc kubenswrapper[4685]: I0128 12:48:36.166936 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:36 crc kubenswrapper[4685]: I0128 12:48:36.167049 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:48:36 crc kubenswrapper[4685]: I0128 12:48:36.190794 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:42 crc kubenswrapper[4685]: I0128 12:48:42.192150 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfldq" event={"ID":"65420e05-082f-4ad4-94f9-01079a28394b","Type":"ContainerStarted","Data":"48f780827a237fd945279ac547addac7791350aa27855e06a77be020a5ae0079"} Jan 28 12:48:43 crc kubenswrapper[4685]: I0128 12:48:43.201212 4685 generic.go:334] "Generic (PLEG): container finished" podID="65420e05-082f-4ad4-94f9-01079a28394b" containerID="48f780827a237fd945279ac547addac7791350aa27855e06a77be020a5ae0079" exitCode=0 Jan 28 12:48:43 crc kubenswrapper[4685]: I0128 12:48:43.201274 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfldq" event={"ID":"65420e05-082f-4ad4-94f9-01079a28394b","Type":"ContainerDied","Data":"48f780827a237fd945279ac547addac7791350aa27855e06a77be020a5ae0079"} Jan 28 12:48:43 crc kubenswrapper[4685]: I0128 12:48:43.203687 4685 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:48:43 crc kubenswrapper[4685]: I0128 12:48:43.546721 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:48:43 crc kubenswrapper[4685]: E0128 12:48:43.547263 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:48:45 crc kubenswrapper[4685]: I0128 12:48:45.216704 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfldq" event={"ID":"65420e05-082f-4ad4-94f9-01079a28394b","Type":"ContainerStarted","Data":"500716302b8d003061bd242fabad5c16b164c2c97115f7a2b1809bade0c3b796"} Jan 28 12:48:45 crc kubenswrapper[4685]: I0128 12:48:45.237549 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bfldq" podStartSLOduration=3.734840896 podStartE2EDuration="17.237524415s" podCreationTimestamp="2026-01-28 12:48:28 +0000 UTC" firstStartedPulling="2026-01-28 12:48:30.094449631 +0000 UTC m=+1661.181863466" lastFinishedPulling="2026-01-28 12:48:43.59713315 +0000 UTC m=+1674.684546985" observedRunningTime="2026-01-28 12:48:45.233847421 +0000 UTC m=+1676.321261256" watchObservedRunningTime="2026-01-28 12:48:45.237524415 +0000 UTC m=+1676.324938260" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.472995 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bvrmc"] Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.480987 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-bvrmc"] Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.555800 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53646360-80c2-4065-a782-f3172609cb2a" path="/var/lib/kubelet/pods/53646360-80c2-4065-a782-f3172609cb2a/volumes" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.577662 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-fqrjs"] Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.579016 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.581433 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"combined-ca-bundle" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.581629 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.584117 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-fqrjs"] Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.711159 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.711214 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.765542 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-combined-ca-bundle\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.765974 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-db-sync-config-data\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.766024 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-config-data\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.766070 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqzwq\" (UniqueName: \"kubernetes.io/projected/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-kube-api-access-jqzwq\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.867342 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-combined-ca-bundle\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.867413 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-db-sync-config-data\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.867434 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-config-data\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.867462 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqzwq\" (UniqueName: \"kubernetes.io/projected/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-kube-api-access-jqzwq\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.874490 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-config-data\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.879333 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-db-sync-config-data\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.886625 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-combined-ca-bundle\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.891597 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqzwq\" (UniqueName: \"kubernetes.io/projected/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-kube-api-access-jqzwq\") pod \"glance-db-sync-fqrjs\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:48 crc kubenswrapper[4685]: I0128 12:48:48.902569 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:49 crc kubenswrapper[4685]: I0128 12:48:49.160679 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-fqrjs"] Jan 28 12:48:49 crc kubenswrapper[4685]: I0128 12:48:49.253033 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-fqrjs" event={"ID":"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6","Type":"ContainerStarted","Data":"10a2e00b139b64ee6daef33c0699ed05ebb748a9cdd52c7841b0582ce6feabd8"} Jan 28 12:48:49 crc kubenswrapper[4685]: I0128 12:48:49.752129 4685 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bfldq" podUID="65420e05-082f-4ad4-94f9-01079a28394b" containerName="registry-server" probeResult="failure" output=< Jan 28 12:48:49 crc kubenswrapper[4685]: timeout: failed to connect service ":50051" within 1s Jan 28 12:48:49 crc kubenswrapper[4685]: > Jan 28 12:48:50 crc kubenswrapper[4685]: I0128 12:48:50.261486 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-fqrjs" event={"ID":"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6","Type":"ContainerStarted","Data":"367e689a8258a1fb3096eeaefbc08c5ab298b0bab15a7d608dcf303260e6e3d0"} Jan 28 12:48:50 crc kubenswrapper[4685]: I0128 12:48:50.284587 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-fqrjs" podStartSLOduration=2.284567203 podStartE2EDuration="2.284567203s" podCreationTimestamp="2026-01-28 12:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:48:50.275910229 +0000 UTC m=+1681.363324074" watchObservedRunningTime="2026-01-28 12:48:50.284567203 +0000 UTC m=+1681.371981038" Jan 28 12:48:53 crc kubenswrapper[4685]: I0128 12:48:53.283115 4685 generic.go:334] "Generic (PLEG): container finished" podID="90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" containerID="367e689a8258a1fb3096eeaefbc08c5ab298b0bab15a7d608dcf303260e6e3d0" exitCode=0 Jan 28 12:48:53 crc kubenswrapper[4685]: I0128 12:48:53.283212 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-fqrjs" event={"ID":"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6","Type":"ContainerDied","Data":"367e689a8258a1fb3096eeaefbc08c5ab298b0bab15a7d608dcf303260e6e3d0"} Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.559929 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.664083 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqzwq\" (UniqueName: \"kubernetes.io/projected/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-kube-api-access-jqzwq\") pod \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.664226 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-combined-ca-bundle\") pod \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.664321 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-config-data\") pod \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.664406 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-db-sync-config-data\") pod \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\" (UID: \"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6\") " Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.671121 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" (UID: "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.682353 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-kube-api-access-jqzwq" (OuterVolumeSpecName: "kube-api-access-jqzwq") pod "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" (UID: "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6"). InnerVolumeSpecName "kube-api-access-jqzwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.686638 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" (UID: "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.701210 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-config-data" (OuterVolumeSpecName: "config-data") pod "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" (UID: "90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.767000 4685 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.767050 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.767103 4685 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:54 crc kubenswrapper[4685]: I0128 12:48:54.767121 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqzwq\" (UniqueName: \"kubernetes.io/projected/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6-kube-api-access-jqzwq\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.301896 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-fqrjs" event={"ID":"90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6","Type":"ContainerDied","Data":"10a2e00b139b64ee6daef33c0699ed05ebb748a9cdd52c7841b0582ce6feabd8"} Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.301946 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10a2e00b139b64ee6daef33c0699ed05ebb748a9cdd52c7841b0582ce6feabd8" Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.301975 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-fqrjs" Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.470533 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.471068 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-1" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-log" containerID="cri-o://af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed" gracePeriod=30 Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.471230 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-1" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-httpd" containerID="cri-o://fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9" gracePeriod=30 Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.483222 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.483605 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-log" containerID="cri-o://6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04" gracePeriod=30 Jan 28 12:48:55 crc kubenswrapper[4685]: I0128 12:48:55.483854 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-httpd" containerID="cri-o://cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e" gracePeriod=30 Jan 28 12:48:56 crc kubenswrapper[4685]: I0128 12:48:56.312357 4685 generic.go:334] "Generic (PLEG): container finished" podID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerID="af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed" exitCode=143 Jan 28 12:48:56 crc kubenswrapper[4685]: I0128 12:48:56.312430 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"2ce87fba-cdd7-4110-aac2-14c174e7e46d","Type":"ContainerDied","Data":"af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed"} Jan 28 12:48:56 crc kubenswrapper[4685]: I0128 12:48:56.314528 4685 generic.go:334] "Generic (PLEG): container finished" podID="352c6743-9f05-432d-a988-54558eeece77" containerID="6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04" exitCode=143 Jan 28 12:48:56 crc kubenswrapper[4685]: I0128 12:48:56.314551 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"352c6743-9f05-432d-a988-54558eeece77","Type":"ContainerDied","Data":"6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04"} Jan 28 12:48:58 crc kubenswrapper[4685]: I0128 12:48:58.546508 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:48:58 crc kubenswrapper[4685]: E0128 12:48:58.547287 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:48:58 crc kubenswrapper[4685]: I0128 12:48:58.765013 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:58 crc kubenswrapper[4685]: I0128 12:48:58.818941 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bfldq" Jan 28 12:48:58 crc kubenswrapper[4685]: I0128 12:48:58.963263 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051500 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-nvme\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051583 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-sys\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051610 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hx8v\" (UniqueName: \"kubernetes.io/projected/2ce87fba-cdd7-4110-aac2-14c174e7e46d-kube-api-access-6hx8v\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051638 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-iscsi\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051633 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051657 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-run\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051698 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-run" (OuterVolumeSpecName: "run") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051697 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-sys" (OuterVolumeSpecName: "sys") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051729 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-dev\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051746 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051772 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051821 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-dev" (OuterVolumeSpecName: "dev") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051848 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-var-locks-brick\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051890 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-httpd-run\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051925 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.051928 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-logs\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052005 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-lib-modules\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052036 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-scripts\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052061 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052090 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-config-data\") pod \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\" (UID: \"2ce87fba-cdd7-4110-aac2-14c174e7e46d\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052095 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052236 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052314 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-logs" (OuterVolumeSpecName: "logs") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052495 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052508 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052518 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052526 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052535 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052542 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052550 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ce87fba-cdd7-4110-aac2-14c174e7e46d-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052558 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.052566 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/2ce87fba-cdd7-4110-aac2-14c174e7e46d-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.058688 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.059051 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance-cache") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.059288 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ce87fba-cdd7-4110-aac2-14c174e7e46d-kube-api-access-6hx8v" (OuterVolumeSpecName: "kube-api-access-6hx8v") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "kube-api-access-6hx8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.059330 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-scripts" (OuterVolumeSpecName: "scripts") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.063989 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.108403 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-config-data" (OuterVolumeSpecName: "config-data") pod "2ce87fba-cdd7-4110-aac2-14c174e7e46d" (UID: "2ce87fba-cdd7-4110-aac2-14c174e7e46d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153658 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153718 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-run\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153751 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-scripts\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153791 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzlf7\" (UniqueName: \"kubernetes.io/projected/352c6743-9f05-432d-a988-54558eeece77-kube-api-access-gzlf7\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153808 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-lib-modules\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153849 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-iscsi\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153873 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153905 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-httpd-run\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153926 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-config-data\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153948 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-sys\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.153971 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-var-locks-brick\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154001 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-nvme\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154023 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-logs\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154041 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-dev\") pod \"352c6743-9f05-432d-a988-54558eeece77\" (UID: \"352c6743-9f05-432d-a988-54558eeece77\") " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154349 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154371 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154381 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce87fba-cdd7-4110-aac2-14c174e7e46d-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154390 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hx8v\" (UniqueName: \"kubernetes.io/projected/2ce87fba-cdd7-4110-aac2-14c174e7e46d-kube-api-access-6hx8v\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.154406 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155266 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155276 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155822 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-run" (OuterVolumeSpecName: "run") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155890 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155914 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-sys" (OuterVolumeSpecName: "sys") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155940 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155964 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-dev" (OuterVolumeSpecName: "dev") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.155847 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.156246 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-logs" (OuterVolumeSpecName: "logs") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.156736 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage20-crc" (OuterVolumeSpecName: "glance") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "local-storage20-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.157454 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-scripts" (OuterVolumeSpecName: "scripts") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.159610 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/352c6743-9f05-432d-a988-54558eeece77-kube-api-access-gzlf7" (OuterVolumeSpecName: "kube-api-access-gzlf7") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "kube-api-access-gzlf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.163898 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage17-crc" (OuterVolumeSpecName: "glance-cache") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "local-storage17-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.168099 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.169411 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.191254 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-config-data" (OuterVolumeSpecName: "config-data") pod "352c6743-9f05-432d-a988-54558eeece77" (UID: "352c6743-9f05-432d-a988-54558eeece77"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.256235 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260141 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260162 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260196 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260209 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzlf7\" (UniqueName: \"kubernetes.io/projected/352c6743-9f05-432d-a988-54558eeece77-kube-api-access-gzlf7\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260219 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260229 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260284 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" " Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260295 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260306 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260315 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352c6743-9f05-432d-a988-54558eeece77-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260324 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260351 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260360 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260368 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/352c6743-9f05-432d-a988-54558eeece77-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.260377 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/352c6743-9f05-432d-a988-54558eeece77-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.269773 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage17-crc" (UniqueName: "kubernetes.io/local-volume/local-storage17-crc") on node "crc" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.273038 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage20-crc" (UniqueName: "kubernetes.io/local-volume/local-storage20-crc") on node "crc" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.358855 4685 generic.go:334] "Generic (PLEG): container finished" podID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerID="fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9" exitCode=0 Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.358922 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.358931 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"2ce87fba-cdd7-4110-aac2-14c174e7e46d","Type":"ContainerDied","Data":"fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9"} Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.358986 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"2ce87fba-cdd7-4110-aac2-14c174e7e46d","Type":"ContainerDied","Data":"4e3428c6662e57adc21db572f7f532f6cacd59813dd3d7259a6040ba4b3d2737"} Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.359006 4685 scope.go:117] "RemoveContainer" containerID="fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.362800 4685 generic.go:334] "Generic (PLEG): container finished" podID="352c6743-9f05-432d-a988-54558eeece77" containerID="cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e" exitCode=0 Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.362860 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.362863 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"352c6743-9f05-432d-a988-54558eeece77","Type":"ContainerDied","Data":"cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e"} Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.362927 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"352c6743-9f05-432d-a988-54558eeece77","Type":"ContainerDied","Data":"4f5caef80ed9a3c698dcc39fc0125f14ff5d5a18779ff35dde6608173a3a5d2d"} Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.363194 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.363418 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.399004 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.401553 4685 scope.go:117] "RemoveContainer" containerID="af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.407594 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.414584 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.427449 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.428356 4685 scope.go:117] "RemoveContainer" containerID="fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9" Jan 28 12:48:59 crc kubenswrapper[4685]: E0128 12:48:59.428760 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9\": container with ID starting with fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9 not found: ID does not exist" containerID="fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.428791 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9"} err="failed to get container status \"fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9\": rpc error: code = NotFound desc = could not find container \"fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9\": container with ID starting with fe361c01f8525b35fc5882a549ff231bdb9c4ac71f57986fdc5473d46deb3bb9 not found: ID does not exist" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.428833 4685 scope.go:117] "RemoveContainer" containerID="af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed" Jan 28 12:48:59 crc kubenswrapper[4685]: E0128 12:48:59.429081 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed\": container with ID starting with af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed not found: ID does not exist" containerID="af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.429103 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed"} err="failed to get container status \"af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed\": rpc error: code = NotFound desc = could not find container \"af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed\": container with ID starting with af162de234e737d20eb7d2a2aa60f1e7f278bb064cb1f90e1339ae095c8f10ed not found: ID does not exist" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.429118 4685 scope.go:117] "RemoveContainer" containerID="cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.447933 4685 scope.go:117] "RemoveContainer" containerID="6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.461164 4685 scope.go:117] "RemoveContainer" containerID="cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e" Jan 28 12:48:59 crc kubenswrapper[4685]: E0128 12:48:59.461483 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e\": container with ID starting with cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e not found: ID does not exist" containerID="cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.461518 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e"} err="failed to get container status \"cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e\": rpc error: code = NotFound desc = could not find container \"cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e\": container with ID starting with cdd87685d25b5c43e6bc80c99cf0858d416e93cb690dd217b9ef83ed3b064f8e not found: ID does not exist" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.461538 4685 scope.go:117] "RemoveContainer" containerID="6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04" Jan 28 12:48:59 crc kubenswrapper[4685]: E0128 12:48:59.461957 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04\": container with ID starting with 6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04 not found: ID does not exist" containerID="6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04" Jan 28 12:48:59 crc kubenswrapper[4685]: I0128 12:48:59.461984 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04"} err="failed to get container status \"6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04\": rpc error: code = NotFound desc = could not find container \"6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04\": container with ID starting with 6e2abcf92a349a3fc13bd2677b272188f57d3d827097c66147255c6c3394ca04 not found: ID does not exist" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.580648 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" path="/var/lib/kubelet/pods/2ce87fba-cdd7-4110-aac2-14c174e7e46d/volumes" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.581726 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="352c6743-9f05-432d-a988-54558eeece77" path="/var/lib/kubelet/pods/352c6743-9f05-432d-a988-54558eeece77/volumes" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.813462 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:49:00 crc kubenswrapper[4685]: E0128 12:49:00.813961 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-httpd" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814033 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-httpd" Jan 28 12:49:00 crc kubenswrapper[4685]: E0128 12:49:00.814119 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-log" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814231 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-log" Jan 28 12:49:00 crc kubenswrapper[4685]: E0128 12:49:00.814316 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-log" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814376 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-log" Jan 28 12:49:00 crc kubenswrapper[4685]: E0128 12:49:00.814466 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" containerName="glance-db-sync" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814532 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" containerName="glance-db-sync" Jan 28 12:49:00 crc kubenswrapper[4685]: E0128 12:49:00.814595 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-httpd" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814652 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-httpd" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814828 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-httpd" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814888 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-log" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.814956 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="352c6743-9f05-432d-a988-54558eeece77" containerName="glance-httpd" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.815019 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" containerName="glance-db-sync" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.815075 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ce87fba-cdd7-4110-aac2-14c174e7e46d" containerName="glance-log" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.815901 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.819903 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-single-config-data" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.819974 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-684br" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.820093 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"combined-ca-bundle" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.824901 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.824995 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"cert-glance-default-public-svc" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.825061 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"cert-glance-default-internal-svc" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.827452 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.887673 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-scripts\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.887730 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-logs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.887791 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-internal-tls-certs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.887823 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.887847 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-public-tls-certs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.887872 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-config-data\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.887892 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-httpd-run\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.888084 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-combined-ca-bundle\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.888149 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8d5s\" (UniqueName: \"kubernetes.io/projected/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-kube-api-access-f8d5s\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989583 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-public-tls-certs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989631 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-config-data\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989652 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-httpd-run\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989686 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-combined-ca-bundle\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989711 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8d5s\" (UniqueName: \"kubernetes.io/projected/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-kube-api-access-f8d5s\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989800 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-scripts\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989833 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-logs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989856 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-internal-tls-certs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.989979 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.990224 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") device mount path \"/mnt/openstack/pv20\"" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.990603 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-httpd-run\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.990660 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-logs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.994187 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-scripts\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.994228 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-combined-ca-bundle\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.995309 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-public-tls-certs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:00 crc kubenswrapper[4685]: I0128 12:49:00.996645 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-config-data\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:01 crc kubenswrapper[4685]: I0128 12:49:01.006076 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-internal-tls-certs\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:01 crc kubenswrapper[4685]: I0128 12:49:01.011106 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8d5s\" (UniqueName: \"kubernetes.io/projected/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-kube-api-access-f8d5s\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:01 crc kubenswrapper[4685]: I0128 12:49:01.013756 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-0\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:01 crc kubenswrapper[4685]: I0128 12:49:01.138162 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:01 crc kubenswrapper[4685]: I0128 12:49:01.402856 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bfldq"] Jan 28 12:49:01 crc kubenswrapper[4685]: I0128 12:49:01.595310 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:49:01 crc kubenswrapper[4685]: I0128 12:49:01.982759 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nfmfw"] Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.392310 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8","Type":"ContainerStarted","Data":"4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429"} Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.392650 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8","Type":"ContainerStarted","Data":"08e762e9210622fc723798ad64e7629bc0b5eecb2f86a028a4074f8092b80a0c"} Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.392459 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nfmfw" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="registry-server" containerID="cri-o://0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c" gracePeriod=2 Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.724859 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.847384 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z96zq\" (UniqueName: \"kubernetes.io/projected/14ffad50-547e-4c2a-b72c-d6a87b50e746-kube-api-access-z96zq\") pod \"14ffad50-547e-4c2a-b72c-d6a87b50e746\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.847517 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-utilities\") pod \"14ffad50-547e-4c2a-b72c-d6a87b50e746\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.847573 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-catalog-content\") pod \"14ffad50-547e-4c2a-b72c-d6a87b50e746\" (UID: \"14ffad50-547e-4c2a-b72c-d6a87b50e746\") " Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.848455 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-utilities" (OuterVolumeSpecName: "utilities") pod "14ffad50-547e-4c2a-b72c-d6a87b50e746" (UID: "14ffad50-547e-4c2a-b72c-d6a87b50e746"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.852432 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14ffad50-547e-4c2a-b72c-d6a87b50e746-kube-api-access-z96zq" (OuterVolumeSpecName: "kube-api-access-z96zq") pod "14ffad50-547e-4c2a-b72c-d6a87b50e746" (UID: "14ffad50-547e-4c2a-b72c-d6a87b50e746"). InnerVolumeSpecName "kube-api-access-z96zq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.949297 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z96zq\" (UniqueName: \"kubernetes.io/projected/14ffad50-547e-4c2a-b72c-d6a87b50e746-kube-api-access-z96zq\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.949333 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:02 crc kubenswrapper[4685]: I0128 12:49:02.979355 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14ffad50-547e-4c2a-b72c-d6a87b50e746" (UID: "14ffad50-547e-4c2a-b72c-d6a87b50e746"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.051231 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ffad50-547e-4c2a-b72c-d6a87b50e746-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.402934 4685 generic.go:334] "Generic (PLEG): container finished" podID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerID="0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c" exitCode=0 Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.402989 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nfmfw" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.403007 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfmfw" event={"ID":"14ffad50-547e-4c2a-b72c-d6a87b50e746","Type":"ContainerDied","Data":"0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c"} Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.403382 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nfmfw" event={"ID":"14ffad50-547e-4c2a-b72c-d6a87b50e746","Type":"ContainerDied","Data":"ec02327c7bc9e9ac7369a31c424a0988c5836c201f1008989f54889605afcc2c"} Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.403403 4685 scope.go:117] "RemoveContainer" containerID="0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.405277 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8","Type":"ContainerStarted","Data":"541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129"} Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.423983 4685 scope.go:117] "RemoveContainer" containerID="dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.453879 4685 scope.go:117] "RemoveContainer" containerID="d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.470098 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-0" podStartSLOduration=3.470080787 podStartE2EDuration="3.470080787s" podCreationTimestamp="2026-01-28 12:49:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:03.4535601 +0000 UTC m=+1694.540973945" watchObservedRunningTime="2026-01-28 12:49:03.470080787 +0000 UTC m=+1694.557494632" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.475811 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nfmfw"] Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.480100 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nfmfw"] Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.486257 4685 scope.go:117] "RemoveContainer" containerID="0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c" Jan 28 12:49:03 crc kubenswrapper[4685]: E0128 12:49:03.486841 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c\": container with ID starting with 0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c not found: ID does not exist" containerID="0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.486892 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c"} err="failed to get container status \"0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c\": rpc error: code = NotFound desc = could not find container \"0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c\": container with ID starting with 0d382aab3b6af6cbba9c45f8e909baf5ed3172a3cdca69be205e47ca3f68762c not found: ID does not exist" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.486926 4685 scope.go:117] "RemoveContainer" containerID="dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63" Jan 28 12:49:03 crc kubenswrapper[4685]: E0128 12:49:03.487641 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63\": container with ID starting with dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63 not found: ID does not exist" containerID="dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.487665 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63"} err="failed to get container status \"dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63\": rpc error: code = NotFound desc = could not find container \"dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63\": container with ID starting with dd232d9c8bc91d905471314993d443a669ece4e193f3ec0e768fa99f513f2e63 not found: ID does not exist" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.487683 4685 scope.go:117] "RemoveContainer" containerID="d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a" Jan 28 12:49:03 crc kubenswrapper[4685]: E0128 12:49:03.488220 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a\": container with ID starting with d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a not found: ID does not exist" containerID="d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a" Jan 28 12:49:03 crc kubenswrapper[4685]: I0128 12:49:03.488253 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a"} err="failed to get container status \"d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a\": rpc error: code = NotFound desc = could not find container \"d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a\": container with ID starting with d25741f9f1c1cd28f65ff28e196a93874c328f3f2fd177c5212e710588f2dc3a not found: ID does not exist" Jan 28 12:49:04 crc kubenswrapper[4685]: I0128 12:49:04.555400 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" path="/var/lib/kubelet/pods/14ffad50-547e-4c2a-b72c-d6a87b50e746/volumes" Jan 28 12:49:11 crc kubenswrapper[4685]: I0128 12:49:11.138946 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:11 crc kubenswrapper[4685]: I0128 12:49:11.139488 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:11 crc kubenswrapper[4685]: I0128 12:49:11.161027 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:11 crc kubenswrapper[4685]: I0128 12:49:11.182321 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:11 crc kubenswrapper[4685]: I0128 12:49:11.473875 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:11 crc kubenswrapper[4685]: I0128 12:49:11.473920 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:11 crc kubenswrapper[4685]: I0128 12:49:11.546204 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:49:11 crc kubenswrapper[4685]: E0128 12:49:11.546730 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:49:13 crc kubenswrapper[4685]: I0128 12:49:13.586608 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:13 crc kubenswrapper[4685]: I0128 12:49:13.587052 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:49:13 crc kubenswrapper[4685]: I0128 12:49:13.588156 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.746913 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-fqrjs"] Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.753482 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-fqrjs"] Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.798916 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glancecf6a-account-delete-zpnkd"] Jan 28 12:49:14 crc kubenswrapper[4685]: E0128 12:49:14.799307 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="registry-server" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.799326 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="registry-server" Jan 28 12:49:14 crc kubenswrapper[4685]: E0128 12:49:14.799364 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="extract-content" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.799373 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="extract-content" Jan 28 12:49:14 crc kubenswrapper[4685]: E0128 12:49:14.799392 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="extract-utilities" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.799404 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="extract-utilities" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.799552 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="14ffad50-547e-4c2a-b72c-d6a87b50e746" containerName="registry-server" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.800149 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.828804 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glancecf6a-account-delete-zpnkd"] Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.852693 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.915986 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e194133-c3d1-4575-9cba-e44a2597fccb-operator-scripts\") pod \"glancecf6a-account-delete-zpnkd\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:14 crc kubenswrapper[4685]: I0128 12:49:14.916106 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg8kl\" (UniqueName: \"kubernetes.io/projected/1e194133-c3d1-4575-9cba-e44a2597fccb-kube-api-access-pg8kl\") pod \"glancecf6a-account-delete-zpnkd\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.017478 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e194133-c3d1-4575-9cba-e44a2597fccb-operator-scripts\") pod \"glancecf6a-account-delete-zpnkd\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.017555 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg8kl\" (UniqueName: \"kubernetes.io/projected/1e194133-c3d1-4575-9cba-e44a2597fccb-kube-api-access-pg8kl\") pod \"glancecf6a-account-delete-zpnkd\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.018404 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e194133-c3d1-4575-9cba-e44a2597fccb-operator-scripts\") pod \"glancecf6a-account-delete-zpnkd\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.037183 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg8kl\" (UniqueName: \"kubernetes.io/projected/1e194133-c3d1-4575-9cba-e44a2597fccb-kube-api-access-pg8kl\") pod \"glancecf6a-account-delete-zpnkd\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.127792 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.457372 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glancecf6a-account-delete-zpnkd"] Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.502567 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" event={"ID":"1e194133-c3d1-4575-9cba-e44a2597fccb","Type":"ContainerStarted","Data":"b4fe3f1156dbd6cd7df46bdd7bdb310e36f7e07f58da63f5c72c790f716a23b4"} Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.502755 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-log" containerID="cri-o://4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429" gracePeriod=30 Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.503146 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-httpd" containerID="cri-o://541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129" gracePeriod=30 Jan 28 12:49:15 crc kubenswrapper[4685]: I0128 12:49:15.508810 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-single-0" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.106:9292/healthcheck\": EOF" Jan 28 12:49:16 crc kubenswrapper[4685]: I0128 12:49:16.511054 4685 generic.go:334] "Generic (PLEG): container finished" podID="1e194133-c3d1-4575-9cba-e44a2597fccb" containerID="63e3b78192d6728588ca054b56757e722ca0e7419547cd935613cad3ff248eaf" exitCode=0 Jan 28 12:49:16 crc kubenswrapper[4685]: I0128 12:49:16.511110 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" event={"ID":"1e194133-c3d1-4575-9cba-e44a2597fccb","Type":"ContainerDied","Data":"63e3b78192d6728588ca054b56757e722ca0e7419547cd935613cad3ff248eaf"} Jan 28 12:49:16 crc kubenswrapper[4685]: I0128 12:49:16.513635 4685 generic.go:334] "Generic (PLEG): container finished" podID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerID="4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429" exitCode=143 Jan 28 12:49:16 crc kubenswrapper[4685]: I0128 12:49:16.513746 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8","Type":"ContainerDied","Data":"4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429"} Jan 28 12:49:16 crc kubenswrapper[4685]: I0128 12:49:16.553562 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6" path="/var/lib/kubelet/pods/90eaeb2a-40dc-4636-a6d1-1da29fe0b4b6/volumes" Jan 28 12:49:17 crc kubenswrapper[4685]: I0128 12:49:17.812966 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:17 crc kubenswrapper[4685]: I0128 12:49:17.980421 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pg8kl\" (UniqueName: \"kubernetes.io/projected/1e194133-c3d1-4575-9cba-e44a2597fccb-kube-api-access-pg8kl\") pod \"1e194133-c3d1-4575-9cba-e44a2597fccb\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " Jan 28 12:49:17 crc kubenswrapper[4685]: I0128 12:49:17.980500 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e194133-c3d1-4575-9cba-e44a2597fccb-operator-scripts\") pod \"1e194133-c3d1-4575-9cba-e44a2597fccb\" (UID: \"1e194133-c3d1-4575-9cba-e44a2597fccb\") " Jan 28 12:49:17 crc kubenswrapper[4685]: I0128 12:49:17.981265 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e194133-c3d1-4575-9cba-e44a2597fccb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1e194133-c3d1-4575-9cba-e44a2597fccb" (UID: "1e194133-c3d1-4575-9cba-e44a2597fccb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:17 crc kubenswrapper[4685]: I0128 12:49:17.987057 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e194133-c3d1-4575-9cba-e44a2597fccb-kube-api-access-pg8kl" (OuterVolumeSpecName: "kube-api-access-pg8kl") pod "1e194133-c3d1-4575-9cba-e44a2597fccb" (UID: "1e194133-c3d1-4575-9cba-e44a2597fccb"). InnerVolumeSpecName "kube-api-access-pg8kl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:18 crc kubenswrapper[4685]: I0128 12:49:18.082044 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pg8kl\" (UniqueName: \"kubernetes.io/projected/1e194133-c3d1-4575-9cba-e44a2597fccb-kube-api-access-pg8kl\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:18 crc kubenswrapper[4685]: I0128 12:49:18.082085 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e194133-c3d1-4575-9cba-e44a2597fccb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:18 crc kubenswrapper[4685]: I0128 12:49:18.532406 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" event={"ID":"1e194133-c3d1-4575-9cba-e44a2597fccb","Type":"ContainerDied","Data":"b4fe3f1156dbd6cd7df46bdd7bdb310e36f7e07f58da63f5c72c790f716a23b4"} Jan 28 12:49:18 crc kubenswrapper[4685]: I0128 12:49:18.532617 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4fe3f1156dbd6cd7df46bdd7bdb310e36f7e07f58da63f5c72c790f716a23b4" Jan 28 12:49:18 crc kubenswrapper[4685]: I0128 12:49:18.532519 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glancecf6a-account-delete-zpnkd" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.196940 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.399502 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-httpd-run\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.399836 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.399904 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8d5s\" (UniqueName: \"kubernetes.io/projected/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-kube-api-access-f8d5s\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.399939 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-config-data\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.399965 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-public-tls-certs\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.400012 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-scripts\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.400487 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.400704 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-logs\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.400743 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-combined-ca-bundle\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.400812 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-internal-tls-certs\") pod \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\" (UID: \"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8\") " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.401118 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.401137 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-logs" (OuterVolumeSpecName: "logs") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.405452 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-kube-api-access-f8d5s" (OuterVolumeSpecName: "kube-api-access-f8d5s") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "kube-api-access-f8d5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.406511 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage20-crc" (OuterVolumeSpecName: "glance") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "local-storage20-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.416273 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-scripts" (OuterVolumeSpecName: "scripts") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.424614 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.441305 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.441678 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.444987 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-config-data" (OuterVolumeSpecName: "config-data") pod "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" (UID: "a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502813 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8d5s\" (UniqueName: \"kubernetes.io/projected/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-kube-api-access-f8d5s\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502882 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502893 4685 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502901 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502932 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502941 4685 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502950 4685 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.502979 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" " Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.515344 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage20-crc" (UniqueName: "kubernetes.io/local-volume/local-storage20-crc") on node "crc" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.548593 4685 generic.go:334] "Generic (PLEG): container finished" podID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerID="541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129" exitCode=0 Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.548644 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8","Type":"ContainerDied","Data":"541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129"} Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.548673 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8","Type":"ContainerDied","Data":"08e762e9210622fc723798ad64e7629bc0b5eecb2f86a028a4074f8092b80a0c"} Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.548677 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.548690 4685 scope.go:117] "RemoveContainer" containerID="541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.573653 4685 scope.go:117] "RemoveContainer" containerID="4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.583428 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.590941 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.598499 4685 scope.go:117] "RemoveContainer" containerID="541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129" Jan 28 12:49:19 crc kubenswrapper[4685]: E0128 12:49:19.598911 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129\": container with ID starting with 541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129 not found: ID does not exist" containerID="541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.598957 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129"} err="failed to get container status \"541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129\": rpc error: code = NotFound desc = could not find container \"541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129\": container with ID starting with 541cf062bc96e13321f3708f0361a2d1e0615b6a71dc747af29ffecfd9f16129 not found: ID does not exist" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.598988 4685 scope.go:117] "RemoveContainer" containerID="4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429" Jan 28 12:49:19 crc kubenswrapper[4685]: E0128 12:49:19.599344 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429\": container with ID starting with 4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429 not found: ID does not exist" containerID="4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.599360 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429"} err="failed to get container status \"4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429\": rpc error: code = NotFound desc = could not find container \"4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429\": container with ID starting with 4b9284efe25b9cd911bbeaafd503cb523091549b3965ff508f785ba6c9d45429 not found: ID does not exist" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.604429 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.844518 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-create-bd7fb"] Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.850228 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-create-bd7fb"] Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.859457 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf"] Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.864759 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glancecf6a-account-delete-zpnkd"] Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.870299 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-cf6a-account-create-update-hl5sf"] Jan 28 12:49:19 crc kubenswrapper[4685]: I0128 12:49:19.879583 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glancecf6a-account-delete-zpnkd"] Jan 28 12:49:20 crc kubenswrapper[4685]: I0128 12:49:20.557663 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06252eec-03e7-48a6-ba7b-567b233e3ee1" path="/var/lib/kubelet/pods/06252eec-03e7-48a6-ba7b-567b233e3ee1/volumes" Jan 28 12:49:20 crc kubenswrapper[4685]: I0128 12:49:20.558525 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae1181-ad73-4a92-8676-df118044b818" path="/var/lib/kubelet/pods/09ae1181-ad73-4a92-8676-df118044b818/volumes" Jan 28 12:49:20 crc kubenswrapper[4685]: I0128 12:49:20.560047 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e194133-c3d1-4575-9cba-e44a2597fccb" path="/var/lib/kubelet/pods/1e194133-c3d1-4575-9cba-e44a2597fccb/volumes" Jan 28 12:49:20 crc kubenswrapper[4685]: I0128 12:49:20.562217 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" path="/var/lib/kubelet/pods/a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8/volumes" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.233067 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-create-6dg77"] Jan 28 12:49:21 crc kubenswrapper[4685]: E0128 12:49:21.233435 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e194133-c3d1-4575-9cba-e44a2597fccb" containerName="mariadb-account-delete" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.233454 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e194133-c3d1-4575-9cba-e44a2597fccb" containerName="mariadb-account-delete" Jan 28 12:49:21 crc kubenswrapper[4685]: E0128 12:49:21.233481 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-log" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.233489 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-log" Jan 28 12:49:21 crc kubenswrapper[4685]: E0128 12:49:21.233521 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-httpd" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.233529 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-httpd" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.233678 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-log" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.233699 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a35e04ae-60f3-41d9-95d4-92b4ffa7f9f8" containerName="glance-httpd" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.233714 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e194133-c3d1-4575-9cba-e44a2597fccb" containerName="mariadb-account-delete" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.234361 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.247928 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-74b0-account-create-update-n2xpz"] Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.248829 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.251712 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-db-secret" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.276742 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-6dg77"] Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.282916 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-74b0-account-create-update-n2xpz"] Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.326517 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29pxw\" (UniqueName: \"kubernetes.io/projected/f2e82a53-2154-464c-b908-01c0a787d309-kube-api-access-29pxw\") pod \"glance-db-create-6dg77\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.326594 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e82a53-2154-464c-b908-01c0a787d309-operator-scripts\") pod \"glance-db-create-6dg77\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.326628 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-operator-scripts\") pod \"glance-74b0-account-create-update-n2xpz\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.326865 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vspk9\" (UniqueName: \"kubernetes.io/projected/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-kube-api-access-vspk9\") pod \"glance-74b0-account-create-update-n2xpz\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.427410 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vspk9\" (UniqueName: \"kubernetes.io/projected/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-kube-api-access-vspk9\") pod \"glance-74b0-account-create-update-n2xpz\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.427482 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29pxw\" (UniqueName: \"kubernetes.io/projected/f2e82a53-2154-464c-b908-01c0a787d309-kube-api-access-29pxw\") pod \"glance-db-create-6dg77\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.427515 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e82a53-2154-464c-b908-01c0a787d309-operator-scripts\") pod \"glance-db-create-6dg77\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.427538 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-operator-scripts\") pod \"glance-74b0-account-create-update-n2xpz\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.428183 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-operator-scripts\") pod \"glance-74b0-account-create-update-n2xpz\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.428979 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e82a53-2154-464c-b908-01c0a787d309-operator-scripts\") pod \"glance-db-create-6dg77\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.450998 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vspk9\" (UniqueName: \"kubernetes.io/projected/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-kube-api-access-vspk9\") pod \"glance-74b0-account-create-update-n2xpz\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.456861 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29pxw\" (UniqueName: \"kubernetes.io/projected/f2e82a53-2154-464c-b908-01c0a787d309-kube-api-access-29pxw\") pod \"glance-db-create-6dg77\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.558131 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.577843 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:21 crc kubenswrapper[4685]: I0128 12:49:21.979519 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-6dg77"] Jan 28 12:49:22 crc kubenswrapper[4685]: I0128 12:49:22.038713 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-74b0-account-create-update-n2xpz"] Jan 28 12:49:22 crc kubenswrapper[4685]: W0128 12:49:22.044393 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f93fbd2_b7fc_4aa8_a98a_eff0e2ef0e7e.slice/crio-cf24e2ff88c6b0b937de1df071bc3a493b310e7195fffaf6cb86e43078add688 WatchSource:0}: Error finding container cf24e2ff88c6b0b937de1df071bc3a493b310e7195fffaf6cb86e43078add688: Status 404 returned error can't find the container with id cf24e2ff88c6b0b937de1df071bc3a493b310e7195fffaf6cb86e43078add688 Jan 28 12:49:22 crc kubenswrapper[4685]: I0128 12:49:22.575791 4685 generic.go:334] "Generic (PLEG): container finished" podID="2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e" containerID="ab0b30dc02396b83bf1f61c9d41a3aff0baf1a6e769900e5f4c55da7e2bf2004" exitCode=0 Jan 28 12:49:22 crc kubenswrapper[4685]: I0128 12:49:22.575884 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" event={"ID":"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e","Type":"ContainerDied","Data":"ab0b30dc02396b83bf1f61c9d41a3aff0baf1a6e769900e5f4c55da7e2bf2004"} Jan 28 12:49:22 crc kubenswrapper[4685]: I0128 12:49:22.576393 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" event={"ID":"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e","Type":"ContainerStarted","Data":"cf24e2ff88c6b0b937de1df071bc3a493b310e7195fffaf6cb86e43078add688"} Jan 28 12:49:22 crc kubenswrapper[4685]: I0128 12:49:22.578963 4685 generic.go:334] "Generic (PLEG): container finished" podID="f2e82a53-2154-464c-b908-01c0a787d309" containerID="edd16fad9dc515cf2967026d77e8e3e6a14761b4d5266c917c303dc2c1cc7874" exitCode=0 Jan 28 12:49:22 crc kubenswrapper[4685]: I0128 12:49:22.578992 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-6dg77" event={"ID":"f2e82a53-2154-464c-b908-01c0a787d309","Type":"ContainerDied","Data":"edd16fad9dc515cf2967026d77e8e3e6a14761b4d5266c917c303dc2c1cc7874"} Jan 28 12:49:22 crc kubenswrapper[4685]: I0128 12:49:22.579026 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-6dg77" event={"ID":"f2e82a53-2154-464c-b908-01c0a787d309","Type":"ContainerStarted","Data":"8d8281661683468dd2ab90eafa673f4ea066a411e545b0f638f54c99b434f788"} Jan 28 12:49:23 crc kubenswrapper[4685]: I0128 12:49:23.956988 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:23 crc kubenswrapper[4685]: I0128 12:49:23.962110 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.059213 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e82a53-2154-464c-b908-01c0a787d309-operator-scripts\") pod \"f2e82a53-2154-464c-b908-01c0a787d309\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.059436 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29pxw\" (UniqueName: \"kubernetes.io/projected/f2e82a53-2154-464c-b908-01c0a787d309-kube-api-access-29pxw\") pod \"f2e82a53-2154-464c-b908-01c0a787d309\" (UID: \"f2e82a53-2154-464c-b908-01c0a787d309\") " Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.059692 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2e82a53-2154-464c-b908-01c0a787d309-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f2e82a53-2154-464c-b908-01c0a787d309" (UID: "f2e82a53-2154-464c-b908-01c0a787d309"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.059817 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e82a53-2154-464c-b908-01c0a787d309-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.065353 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2e82a53-2154-464c-b908-01c0a787d309-kube-api-access-29pxw" (OuterVolumeSpecName: "kube-api-access-29pxw") pod "f2e82a53-2154-464c-b908-01c0a787d309" (UID: "f2e82a53-2154-464c-b908-01c0a787d309"). InnerVolumeSpecName "kube-api-access-29pxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.161270 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vspk9\" (UniqueName: \"kubernetes.io/projected/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-kube-api-access-vspk9\") pod \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.161405 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-operator-scripts\") pod \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\" (UID: \"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e\") " Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.161989 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e" (UID: "2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.162047 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29pxw\" (UniqueName: \"kubernetes.io/projected/f2e82a53-2154-464c-b908-01c0a787d309-kube-api-access-29pxw\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.164607 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-kube-api-access-vspk9" (OuterVolumeSpecName: "kube-api-access-vspk9") pod "2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e" (UID: "2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e"). InnerVolumeSpecName "kube-api-access-vspk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.263277 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vspk9\" (UniqueName: \"kubernetes.io/projected/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-kube-api-access-vspk9\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.263315 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.594745 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6dg77" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.594739 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-6dg77" event={"ID":"f2e82a53-2154-464c-b908-01c0a787d309","Type":"ContainerDied","Data":"8d8281661683468dd2ab90eafa673f4ea066a411e545b0f638f54c99b434f788"} Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.594887 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d8281661683468dd2ab90eafa673f4ea066a411e545b0f638f54c99b434f788" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.595997 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" event={"ID":"2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e","Type":"ContainerDied","Data":"cf24e2ff88c6b0b937de1df071bc3a493b310e7195fffaf6cb86e43078add688"} Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.596037 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf24e2ff88c6b0b937de1df071bc3a493b310e7195fffaf6cb86e43078add688" Jan 28 12:49:24 crc kubenswrapper[4685]: I0128 12:49:24.596049 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-74b0-account-create-update-n2xpz" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.535292 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-vdwfv"] Jan 28 12:49:26 crc kubenswrapper[4685]: E0128 12:49:26.535931 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e" containerName="mariadb-account-create-update" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.535945 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e" containerName="mariadb-account-create-update" Jan 28 12:49:26 crc kubenswrapper[4685]: E0128 12:49:26.535975 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e82a53-2154-464c-b908-01c0a787d309" containerName="mariadb-database-create" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.535982 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e82a53-2154-464c-b908-01c0a787d309" containerName="mariadb-database-create" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.536113 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e" containerName="mariadb-account-create-update" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.536145 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2e82a53-2154-464c-b908-01c0a787d309" containerName="mariadb-database-create" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.536686 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.544724 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.544999 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-gx244" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.545859 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:49:26 crc kubenswrapper[4685]: E0128 12:49:26.546074 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.555790 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-vdwfv"] Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.596816 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6dwp\" (UniqueName: \"kubernetes.io/projected/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-kube-api-access-s6dwp\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.597134 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-config-data\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.597193 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-db-sync-config-data\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.698243 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-config-data\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.698314 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-db-sync-config-data\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.698440 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6dwp\" (UniqueName: \"kubernetes.io/projected/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-kube-api-access-s6dwp\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.704863 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-db-sync-config-data\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.724974 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-config-data\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.727843 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6dwp\" (UniqueName: \"kubernetes.io/projected/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-kube-api-access-s6dwp\") pod \"glance-db-sync-vdwfv\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:26 crc kubenswrapper[4685]: I0128 12:49:26.856780 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:27 crc kubenswrapper[4685]: I0128 12:49:27.269696 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-vdwfv"] Jan 28 12:49:27 crc kubenswrapper[4685]: I0128 12:49:27.617934 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-vdwfv" event={"ID":"34f1ac52-5781-4029-9ba6-5bc9f7d7a193","Type":"ContainerStarted","Data":"1072dabb87501dfd26de76c6f63c387f1ab1a35b6ee44d432fa4e1c19d2a7e97"} Jan 28 12:49:28 crc kubenswrapper[4685]: I0128 12:49:28.632395 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-vdwfv" event={"ID":"34f1ac52-5781-4029-9ba6-5bc9f7d7a193","Type":"ContainerStarted","Data":"fcac3c3a3e0d9f13e189e87b4daf192db1405d55bdcf80c26e3b263aac8b8503"} Jan 28 12:49:28 crc kubenswrapper[4685]: I0128 12:49:28.652587 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-vdwfv" podStartSLOduration=2.652554005 podStartE2EDuration="2.652554005s" podCreationTimestamp="2026-01-28 12:49:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:28.647292057 +0000 UTC m=+1719.734705902" watchObservedRunningTime="2026-01-28 12:49:28.652554005 +0000 UTC m=+1719.739967840" Jan 28 12:49:31 crc kubenswrapper[4685]: I0128 12:49:31.655785 4685 generic.go:334] "Generic (PLEG): container finished" podID="34f1ac52-5781-4029-9ba6-5bc9f7d7a193" containerID="fcac3c3a3e0d9f13e189e87b4daf192db1405d55bdcf80c26e3b263aac8b8503" exitCode=0 Jan 28 12:49:31 crc kubenswrapper[4685]: I0128 12:49:31.655878 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-vdwfv" event={"ID":"34f1ac52-5781-4029-9ba6-5bc9f7d7a193","Type":"ContainerDied","Data":"fcac3c3a3e0d9f13e189e87b4daf192db1405d55bdcf80c26e3b263aac8b8503"} Jan 28 12:49:32 crc kubenswrapper[4685]: I0128 12:49:32.992373 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.098066 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6dwp\" (UniqueName: \"kubernetes.io/projected/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-kube-api-access-s6dwp\") pod \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.098445 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-config-data\") pod \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.098580 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-db-sync-config-data\") pod \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\" (UID: \"34f1ac52-5781-4029-9ba6-5bc9f7d7a193\") " Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.104910 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-kube-api-access-s6dwp" (OuterVolumeSpecName: "kube-api-access-s6dwp") pod "34f1ac52-5781-4029-9ba6-5bc9f7d7a193" (UID: "34f1ac52-5781-4029-9ba6-5bc9f7d7a193"). InnerVolumeSpecName "kube-api-access-s6dwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.105556 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "34f1ac52-5781-4029-9ba6-5bc9f7d7a193" (UID: "34f1ac52-5781-4029-9ba6-5bc9f7d7a193"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.136719 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-config-data" (OuterVolumeSpecName: "config-data") pod "34f1ac52-5781-4029-9ba6-5bc9f7d7a193" (UID: "34f1ac52-5781-4029-9ba6-5bc9f7d7a193"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.200190 4685 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.200240 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6dwp\" (UniqueName: \"kubernetes.io/projected/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-kube-api-access-s6dwp\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.200255 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34f1ac52-5781-4029-9ba6-5bc9f7d7a193-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.672480 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-vdwfv" event={"ID":"34f1ac52-5781-4029-9ba6-5bc9f7d7a193","Type":"ContainerDied","Data":"1072dabb87501dfd26de76c6f63c387f1ab1a35b6ee44d432fa4e1c19d2a7e97"} Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.672542 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1072dabb87501dfd26de76c6f63c387f1ab1a35b6ee44d432fa4e1c19d2a7e97" Jan 28 12:49:33 crc kubenswrapper[4685]: I0128 12:49:33.672551 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-vdwfv" Jan 28 12:49:34 crc kubenswrapper[4685]: I0128 12:49:34.997894 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:49:34 crc kubenswrapper[4685]: E0128 12:49:34.998377 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34f1ac52-5781-4029-9ba6-5bc9f7d7a193" containerName="glance-db-sync" Jan 28 12:49:34 crc kubenswrapper[4685]: I0128 12:49:34.998389 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="34f1ac52-5781-4029-9ba6-5bc9f7d7a193" containerName="glance-db-sync" Jan 28 12:49:34 crc kubenswrapper[4685]: I0128 12:49:34.998536 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="34f1ac52-5781-4029-9ba6-5bc9f7d7a193" containerName="glance-db-sync" Jan 28 12:49:34 crc kubenswrapper[4685]: I0128 12:49:34.999410 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.001159 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-gx244" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.001448 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.001707 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-external-config-data" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.024146 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.031967 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.033246 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.036061 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-internal-config-data" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.056579 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130158 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-run\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130271 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-scripts\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130290 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-sys\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130313 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-run\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130333 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-scripts\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130533 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130617 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130728 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130808 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-config-data\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130871 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.130981 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131079 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131138 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131222 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7q4n\" (UniqueName: \"kubernetes.io/projected/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-kube-api-access-s7q4n\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131352 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-dev\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131417 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131478 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smmzz\" (UniqueName: \"kubernetes.io/projected/6d480af3-7335-4cde-8f1a-5e8c89339d61-kube-api-access-smmzz\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131542 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131603 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131671 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131728 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-logs\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131825 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-logs\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131903 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.131965 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-dev\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.132043 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.132113 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.132197 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-sys\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.132265 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-config-data\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234113 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234469 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234572 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234682 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7q4n\" (UniqueName: \"kubernetes.io/projected/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-kube-api-access-s7q4n\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234800 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-dev\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234876 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234893 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.234990 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smmzz\" (UniqueName: \"kubernetes.io/projected/6d480af3-7335-4cde-8f1a-5e8c89339d61-kube-api-access-smmzz\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235042 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235076 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235116 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235139 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-logs\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235204 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-logs\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235433 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235512 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235544 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-dev\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235631 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235634 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.235843 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236218 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-sys\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236366 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236319 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-config-data\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236780 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-run\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236818 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-scripts\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236884 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-sys\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236901 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-logs\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236929 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-run\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237005 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-scripts\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237047 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237124 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237127 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-sys\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237164 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237204 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-config-data\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237275 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237346 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237384 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-dev\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236259 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") device mount path \"/mnt/openstack/pv10\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237419 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236727 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-dev\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.236621 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-logs\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.237922 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") device mount path \"/mnt/openstack/pv14\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238097 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238152 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-run\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238200 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-run\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238245 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238240 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") device mount path \"/mnt/openstack/pv07\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238308 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238376 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-sys\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238447 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") device mount path \"/mnt/openstack/pv11\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.238638 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.249917 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-config-data\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.251594 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-scripts\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.256670 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7q4n\" (UniqueName: \"kubernetes.io/projected/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-kube-api-access-s7q4n\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.258085 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-config-data\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.274681 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.277656 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-scripts\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.281671 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.283094 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.286974 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smmzz\" (UniqueName: \"kubernetes.io/projected/6d480af3-7335-4cde-8f1a-5e8c89339d61-kube-api-access-smmzz\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.309484 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.315475 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.360745 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.782931 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:49:35 crc kubenswrapper[4685]: I0128 12:49:35.869345 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:35 crc kubenswrapper[4685]: W0128 12:49:35.872353 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaed4ab3f_dd6f_464c_bf57_29f48f3c9631.slice/crio-a05c5c9dc4a9fba44be2ddb22a12844b13add7cd90aa3477152c34820246f7c1 WatchSource:0}: Error finding container a05c5c9dc4a9fba44be2ddb22a12844b13add7cd90aa3477152c34820246f7c1: Status 404 returned error can't find the container with id a05c5c9dc4a9fba44be2ddb22a12844b13add7cd90aa3477152c34820246f7c1 Jan 28 12:49:36 crc kubenswrapper[4685]: I0128 12:49:36.176589 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:36 crc kubenswrapper[4685]: I0128 12:49:36.696911 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerStarted","Data":"68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40"} Jan 28 12:49:36 crc kubenswrapper[4685]: I0128 12:49:36.697597 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerStarted","Data":"fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4"} Jan 28 12:49:36 crc kubenswrapper[4685]: I0128 12:49:36.697613 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerStarted","Data":"fb28a047a1d87b1eff3b1242ca9b9799c6dcdc986cfe71b1fea99e0ffd7eb31d"} Jan 28 12:49:36 crc kubenswrapper[4685]: I0128 12:49:36.699256 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerStarted","Data":"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1"} Jan 28 12:49:36 crc kubenswrapper[4685]: I0128 12:49:36.699290 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerStarted","Data":"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce"} Jan 28 12:49:36 crc kubenswrapper[4685]: I0128 12:49:36.699302 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerStarted","Data":"a05c5c9dc4a9fba44be2ddb22a12844b13add7cd90aa3477152c34820246f7c1"} Jan 28 12:49:37 crc kubenswrapper[4685]: I0128 12:49:37.707253 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerStarted","Data":"1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa"} Jan 28 12:49:37 crc kubenswrapper[4685]: I0128 12:49:37.711481 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerStarted","Data":"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb"} Jan 28 12:49:37 crc kubenswrapper[4685]: I0128 12:49:37.711613 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-log" containerID="cri-o://12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce" gracePeriod=30 Jan 28 12:49:37 crc kubenswrapper[4685]: I0128 12:49:37.711790 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-api" containerID="cri-o://17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb" gracePeriod=30 Jan 28 12:49:37 crc kubenswrapper[4685]: I0128 12:49:37.711867 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-httpd" containerID="cri-o://98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1" gracePeriod=30 Jan 28 12:49:37 crc kubenswrapper[4685]: I0128 12:49:37.740602 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-0" podStartSLOduration=3.740579774 podStartE2EDuration="3.740579774s" podCreationTimestamp="2026-01-28 12:49:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:37.736689174 +0000 UTC m=+1728.824103039" watchObservedRunningTime="2026-01-28 12:49:37.740579774 +0000 UTC m=+1728.827993629" Jan 28 12:49:37 crc kubenswrapper[4685]: I0128 12:49:37.769470 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=3.76944625 podStartE2EDuration="3.76944625s" podCreationTimestamp="2026-01-28 12:49:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:37.767803403 +0000 UTC m=+1728.855217268" watchObservedRunningTime="2026-01-28 12:49:37.76944625 +0000 UTC m=+1728.856860105" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.281514 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386682 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-iscsi\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386746 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-var-locks-brick\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386784 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-scripts\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386808 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386864 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-config-data\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386885 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-httpd-run\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386902 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386889 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387315 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.386883 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387495 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-dev\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387534 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-run\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387548 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-lib-modules\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387569 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-logs\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387585 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-nvme\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387605 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-sys\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387627 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7q4n\" (UniqueName: \"kubernetes.io/projected/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-kube-api-access-s7q4n\") pod \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\" (UID: \"aed4ab3f-dd6f-464c-bf57-29f48f3c9631\") " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387907 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.387924 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.388256 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.388721 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.388781 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-dev" (OuterVolumeSpecName: "dev") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.388809 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-run" (OuterVolumeSpecName: "run") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.388839 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.389137 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-logs" (OuterVolumeSpecName: "logs") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.389200 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-sys" (OuterVolumeSpecName: "sys") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.391117 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance-cache") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.391484 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-kube-api-access-s7q4n" (OuterVolumeSpecName: "kube-api-access-s7q4n") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "kube-api-access-s7q4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.391966 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-scripts" (OuterVolumeSpecName: "scripts") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.410907 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage14-crc" (OuterVolumeSpecName: "glance") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "local-storage14-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.460278 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-config-data" (OuterVolumeSpecName: "config-data") pod "aed4ab3f-dd6f-464c-bf57-29f48f3c9631" (UID: "aed4ab3f-dd6f-464c-bf57-29f48f3c9631"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490211 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7q4n\" (UniqueName: \"kubernetes.io/projected/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-kube-api-access-s7q4n\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490262 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490316 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490337 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490362 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" " Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490379 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490396 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490412 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490428 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490444 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.490462 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aed4ab3f-dd6f-464c-bf57-29f48f3c9631-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.504011 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage14-crc" (UniqueName: "kubernetes.io/local-volume/local-storage14-crc") on node "crc" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.504340 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.591426 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.591454 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.720869 4685 generic.go:334] "Generic (PLEG): container finished" podID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerID="17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb" exitCode=143 Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721003 4685 generic.go:334] "Generic (PLEG): container finished" podID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerID="98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1" exitCode=143 Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721015 4685 generic.go:334] "Generic (PLEG): container finished" podID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerID="12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce" exitCode=143 Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721085 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721186 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerDied","Data":"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb"} Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721213 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerDied","Data":"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1"} Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721319 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerDied","Data":"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce"} Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721332 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"aed4ab3f-dd6f-464c-bf57-29f48f3c9631","Type":"ContainerDied","Data":"a05c5c9dc4a9fba44be2ddb22a12844b13add7cd90aa3477152c34820246f7c1"} Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.721347 4685 scope.go:117] "RemoveContainer" containerID="17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.748851 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.754432 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.777722 4685 scope.go:117] "RemoveContainer" containerID="98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.779022 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:38 crc kubenswrapper[4685]: E0128 12:49:38.779364 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-httpd" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.779386 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-httpd" Jan 28 12:49:38 crc kubenswrapper[4685]: E0128 12:49:38.779416 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-log" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.779424 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-log" Jan 28 12:49:38 crc kubenswrapper[4685]: E0128 12:49:38.779442 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-api" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.779450 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-api" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.779610 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-api" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.779630 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-httpd" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.779649 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" containerName="glance-log" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.780865 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.782255 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-internal-config-data" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.790464 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.843140 4685 scope.go:117] "RemoveContainer" containerID="12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.865411 4685 scope.go:117] "RemoveContainer" containerID="17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb" Jan 28 12:49:38 crc kubenswrapper[4685]: E0128 12:49:38.865943 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb\": container with ID starting with 17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb not found: ID does not exist" containerID="17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.866008 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb"} err="failed to get container status \"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb\": rpc error: code = NotFound desc = could not find container \"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb\": container with ID starting with 17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.866036 4685 scope.go:117] "RemoveContainer" containerID="98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1" Jan 28 12:49:38 crc kubenswrapper[4685]: E0128 12:49:38.866451 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1\": container with ID starting with 98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1 not found: ID does not exist" containerID="98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.866508 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1"} err="failed to get container status \"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1\": rpc error: code = NotFound desc = could not find container \"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1\": container with ID starting with 98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1 not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.866542 4685 scope.go:117] "RemoveContainer" containerID="12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce" Jan 28 12:49:38 crc kubenswrapper[4685]: E0128 12:49:38.871504 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce\": container with ID starting with 12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce not found: ID does not exist" containerID="12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.871580 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce"} err="failed to get container status \"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce\": rpc error: code = NotFound desc = could not find container \"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce\": container with ID starting with 12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.871632 4685 scope.go:117] "RemoveContainer" containerID="17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.875272 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb"} err="failed to get container status \"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb\": rpc error: code = NotFound desc = could not find container \"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb\": container with ID starting with 17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.875310 4685 scope.go:117] "RemoveContainer" containerID="98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.875942 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1"} err="failed to get container status \"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1\": rpc error: code = NotFound desc = could not find container \"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1\": container with ID starting with 98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1 not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.875984 4685 scope.go:117] "RemoveContainer" containerID="12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.876422 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce"} err="failed to get container status \"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce\": rpc error: code = NotFound desc = could not find container \"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce\": container with ID starting with 12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.876448 4685 scope.go:117] "RemoveContainer" containerID="17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.876779 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb"} err="failed to get container status \"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb\": rpc error: code = NotFound desc = could not find container \"17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb\": container with ID starting with 17c19c7498fd7aa39dd61f071b7f8b5f572c6f18cb06a60e1842f8968ef720cb not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.876797 4685 scope.go:117] "RemoveContainer" containerID="98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.877217 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1"} err="failed to get container status \"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1\": rpc error: code = NotFound desc = could not find container \"98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1\": container with ID starting with 98b856957430f7c4e2b7735030ff8b53ef44716a03a0cd976f6f6be7c74ebbd1 not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.877282 4685 scope.go:117] "RemoveContainer" containerID="12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.877650 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce"} err="failed to get container status \"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce\": rpc error: code = NotFound desc = could not find container \"12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce\": container with ID starting with 12a37436d8265b299f90515229ae5333702c33c97eb70781e09a47e5b4b64dce not found: ID does not exist" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896023 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896070 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-run\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896575 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-sys\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896751 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896809 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896906 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896938 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896964 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.896981 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.897011 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.897607 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-logs\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.897681 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-dev\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.897714 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.897735 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txctj\" (UniqueName: \"kubernetes.io/projected/326f0b30-3e84-4b80-99c4-7fa87312efb2-kube-api-access-txctj\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999097 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-logs\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999414 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-dev\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999497 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-dev\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999520 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999656 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txctj\" (UniqueName: \"kubernetes.io/projected/326f0b30-3e84-4b80-99c4-7fa87312efb2-kube-api-access-txctj\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999744 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999815 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-run\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999881 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-sys\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:38 crc kubenswrapper[4685]: I0128 12:49:38.999946 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000003 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-sys\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000013 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000141 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000197 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000223 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000245 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000286 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000387 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:38.999967 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:38.999747 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000655 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000753 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-run\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.000877 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.001004 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") device mount path \"/mnt/openstack/pv11\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.001016 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") device mount path \"/mnt/openstack/pv14\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.005235 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-logs\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.007374 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.018419 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.018916 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txctj\" (UniqueName: \"kubernetes.io/projected/326f0b30-3e84-4b80-99c4-7fa87312efb2-kube-api-access-txctj\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.030756 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.047280 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-0\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.118685 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.546406 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:49:39 crc kubenswrapper[4685]: E0128 12:49:39.546955 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.550021 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:49:39 crc kubenswrapper[4685]: W0128 12:49:39.559628 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod326f0b30_3e84_4b80_99c4_7fa87312efb2.slice/crio-c6637be01e48cdb303a46fd315213369b704410c24fecb9e5436add52619de42 WatchSource:0}: Error finding container c6637be01e48cdb303a46fd315213369b704410c24fecb9e5436add52619de42: Status 404 returned error can't find the container with id c6637be01e48cdb303a46fd315213369b704410c24fecb9e5436add52619de42 Jan 28 12:49:39 crc kubenswrapper[4685]: I0128 12:49:39.730058 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerStarted","Data":"c6637be01e48cdb303a46fd315213369b704410c24fecb9e5436add52619de42"} Jan 28 12:49:40 crc kubenswrapper[4685]: I0128 12:49:40.553883 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aed4ab3f-dd6f-464c-bf57-29f48f3c9631" path="/var/lib/kubelet/pods/aed4ab3f-dd6f-464c-bf57-29f48f3c9631/volumes" Jan 28 12:49:40 crc kubenswrapper[4685]: I0128 12:49:40.740366 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerStarted","Data":"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953"} Jan 28 12:49:40 crc kubenswrapper[4685]: I0128 12:49:40.740420 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerStarted","Data":"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd"} Jan 28 12:49:40 crc kubenswrapper[4685]: I0128 12:49:40.740434 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerStarted","Data":"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271"} Jan 28 12:49:40 crc kubenswrapper[4685]: I0128 12:49:40.775095 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=2.775071648 podStartE2EDuration="2.775071648s" podCreationTimestamp="2026-01-28 12:49:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:40.76134432 +0000 UTC m=+1731.848758155" watchObservedRunningTime="2026-01-28 12:49:40.775071648 +0000 UTC m=+1731.862485483" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.316763 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.317314 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.317325 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.337411 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.337679 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.353451 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.780925 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.781292 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.781304 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.792713 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.794552 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:45 crc kubenswrapper[4685]: I0128 12:49:45.798125 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.119856 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.120312 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.120328 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.144405 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.144897 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.159939 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.814861 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.814942 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.814962 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.826110 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.827905 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:49 crc kubenswrapper[4685]: I0128 12:49:49.841553 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:49:51 crc kubenswrapper[4685]: I0128 12:49:51.546031 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:49:51 crc kubenswrapper[4685]: E0128 12:49:51.546548 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.083618 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.085791 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.090150 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.091898 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.109648 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.126310 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216078 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-dev\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216122 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216144 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-scripts\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216208 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-logs\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216229 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-run\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216245 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216260 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-scripts\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216277 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216354 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-sys\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216398 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-iscsi\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216430 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-sys\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216504 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-httpd-run\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216564 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-dev\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216626 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-config-data\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216655 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216686 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216714 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndlmq\" (UniqueName: \"kubernetes.io/projected/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-kube-api-access-ndlmq\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216745 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-run\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216772 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-config-data\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216789 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-var-locks-brick\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216846 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216866 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-lib-modules\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216911 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216930 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-nvme\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.216964 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7548\" (UniqueName: \"kubernetes.io/projected/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-kube-api-access-v7548\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.217019 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.217040 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-logs\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.217063 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.224672 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.226261 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.232218 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.233752 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.245679 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.256144 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319738 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-config-data\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319798 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-iscsi\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319828 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319853 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319881 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndlmq\" (UniqueName: \"kubernetes.io/projected/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-kube-api-access-ndlmq\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319910 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-dev\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319934 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-run\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319954 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-config-data\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.319987 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320011 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-var-locks-brick\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320033 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-nvme\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320055 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-sys\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320082 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-logs\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320104 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-logs\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320126 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320146 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-lib-modules\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320190 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320211 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320229 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-nvme\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320249 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-sys\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320271 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-scripts\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320295 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7548\" (UniqueName: \"kubernetes.io/projected/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-kube-api-access-v7548\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320317 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320343 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320368 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320390 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-config-data\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320409 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320430 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-logs\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320457 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-run\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320465 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-var-locks-brick\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320532 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320605 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320628 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-run\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320675 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320702 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320726 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-scripts\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320755 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-lib-modules\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320770 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-config-data\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320816 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320855 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-lib-modules\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320881 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-dev\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320909 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-dev\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320937 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wd5vk\" (UniqueName: \"kubernetes.io/projected/05e1a5a6-7b96-47cb-9221-df4f61a49880-kube-api-access-wd5vk\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.320973 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321007 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-scripts\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321072 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321117 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-run\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321150 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-logs\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321457 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321497 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321527 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-var-locks-brick\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321556 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-scripts\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321583 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-httpd-run\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321618 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321642 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-sys\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321672 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-iscsi\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321707 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fbhq\" (UniqueName: \"kubernetes.io/projected/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-kube-api-access-4fbhq\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321743 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-sys\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321908 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-httpd-run\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.321960 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-dev\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.324282 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-nvme\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.324580 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-run\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.325142 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") device mount path \"/mnt/openstack/pv12\"" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.325210 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.325347 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-logs\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.325390 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") device mount path \"/mnt/openstack/pv18\"" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.325436 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-dev\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.325492 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") device mount path \"/mnt/openstack/pv05\"" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.326451 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.326501 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-run\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.327273 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-httpd-run\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.327337 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-dev\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.327454 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.327515 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-sys\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.327603 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-iscsi\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.327641 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") device mount path \"/mnt/openstack/pv04\"" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.328054 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-logs\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.328598 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-scripts\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.329141 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-sys\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.331725 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-config-data\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.332541 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-config-data\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.336745 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-scripts\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.352445 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7548\" (UniqueName: \"kubernetes.io/projected/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-kube-api-access-v7548\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.354053 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndlmq\" (UniqueName: \"kubernetes.io/projected/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-kube-api-access-ndlmq\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.360412 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.362216 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.364788 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.364821 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-1\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.405556 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.427792 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.427876 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-iscsi\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.427916 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-dev\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.427940 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.427958 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-nvme\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.427979 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-sys\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428011 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-logs\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.427997 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-iscsi\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428022 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-dev\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428036 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-logs\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428306 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428029 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-nvme\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428348 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-sys\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428392 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-scripts\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428434 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428446 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") device mount path \"/mnt/openstack/pv08\"" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428468 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428540 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428565 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-run\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428587 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-config-data\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428663 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") device mount path \"/mnt/openstack/pv02\"" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428935 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") device mount path \"/mnt/openstack/pv17\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.429056 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-run\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.428048 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-sys\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.429248 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-sys\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431420 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-logs\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431429 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-logs\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431529 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-run\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431574 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431602 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-scripts\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431616 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-run\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431639 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-config-data\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431651 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431697 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431727 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-lib-modules\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431752 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-dev\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431781 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wd5vk\" (UniqueName: \"kubernetes.io/projected/05e1a5a6-7b96-47cb-9221-df4f61a49880-kube-api-access-wd5vk\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431863 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431904 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431929 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-var-locks-brick\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.431972 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-httpd-run\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432005 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fbhq\" (UniqueName: \"kubernetes.io/projected/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-kube-api-access-4fbhq\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432099 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") device mount path \"/mnt/openstack/pv13\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432180 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432239 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-lib-modules\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432261 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-dev\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432302 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-var-locks-brick\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432333 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.432357 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.434225 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.435952 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-httpd-run\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.438859 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-config-data\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.449060 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-scripts\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.449533 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-scripts\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.452122 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wd5vk\" (UniqueName: \"kubernetes.io/projected/05e1a5a6-7b96-47cb-9221-df4f61a49880-kube-api-access-wd5vk\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.452200 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-config-data\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.452543 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.454801 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fbhq\" (UniqueName: \"kubernetes.io/projected/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-kube-api-access-4fbhq\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.471857 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.472592 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-2\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.507819 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-internal-api-1\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.543516 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.554866 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.839440 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:49:53 crc kubenswrapper[4685]: W0128 12:49:53.842881 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3b051e6c_2e8b_4ee7_b6e5_544f67c66526.slice/crio-8b4119ad9dc29e161a25a8c2cf9052f85e52d6abc2f6d266015d7b11f7bba870 WatchSource:0}: Error finding container 8b4119ad9dc29e161a25a8c2cf9052f85e52d6abc2f6d266015d7b11f7bba870: Status 404 returned error can't find the container with id 8b4119ad9dc29e161a25a8c2cf9052f85e52d6abc2f6d266015d7b11f7bba870 Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.906420 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:49:53 crc kubenswrapper[4685]: W0128 12:49:53.907688 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2ae8567_d50d_4940_8573_37c5cb1d1b8a.slice/crio-a480b2ded74a0181a87c1528260c0ab269c7d43249773ddf17a321e4b50a7506 WatchSource:0}: Error finding container a480b2ded74a0181a87c1528260c0ab269c7d43249773ddf17a321e4b50a7506: Status 404 returned error can't find the container with id a480b2ded74a0181a87c1528260c0ab269c7d43249773ddf17a321e4b50a7506 Jan 28 12:49:53 crc kubenswrapper[4685]: W0128 12:49:53.977903 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05e1a5a6_7b96_47cb_9221_df4f61a49880.slice/crio-1c418455d3b255ad9ce11a17b314fce13383955f79bae5c9b1ead658a711c9b4 WatchSource:0}: Error finding container 1c418455d3b255ad9ce11a17b314fce13383955f79bae5c9b1ead658a711c9b4: Status 404 returned error can't find the container with id 1c418455d3b255ad9ce11a17b314fce13383955f79bae5c9b1ead658a711c9b4 Jan 28 12:49:53 crc kubenswrapper[4685]: I0128 12:49:53.982388 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.054428 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.850475 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerStarted","Data":"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.851057 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerStarted","Data":"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.851071 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerStarted","Data":"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.851080 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerStarted","Data":"8b4119ad9dc29e161a25a8c2cf9052f85e52d6abc2f6d266015d7b11f7bba870"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.853128 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerStarted","Data":"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.853158 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerStarted","Data":"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.853187 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerStarted","Data":"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.853201 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerStarted","Data":"c54dbd5d78bbd561a607871f340674510b6636e6176238ed5f4d8b4bb7c7882f"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.855415 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerStarted","Data":"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.855452 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerStarted","Data":"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.855466 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerStarted","Data":"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.855478 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerStarted","Data":"1c418455d3b255ad9ce11a17b314fce13383955f79bae5c9b1ead658a711c9b4"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.858591 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerStarted","Data":"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.858639 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerStarted","Data":"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.858652 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerStarted","Data":"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.858665 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerStarted","Data":"a480b2ded74a0181a87c1528260c0ab269c7d43249773ddf17a321e4b50a7506"} Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.888572 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-2" podStartSLOduration=2.888552946 podStartE2EDuration="2.888552946s" podCreationTimestamp="2026-01-28 12:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:54.874973882 +0000 UTC m=+1745.962387717" watchObservedRunningTime="2026-01-28 12:49:54.888552946 +0000 UTC m=+1745.975966781" Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.920519 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-1" podStartSLOduration=2.9204790689999998 podStartE2EDuration="2.920479069s" podCreationTimestamp="2026-01-28 12:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:54.905746662 +0000 UTC m=+1745.993160527" watchObservedRunningTime="2026-01-28 12:49:54.920479069 +0000 UTC m=+1746.007892904" Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.935532 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-1" podStartSLOduration=2.935509854 podStartE2EDuration="2.935509854s" podCreationTimestamp="2026-01-28 12:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:54.931605753 +0000 UTC m=+1746.019019588" watchObservedRunningTime="2026-01-28 12:49:54.935509854 +0000 UTC m=+1746.022923689" Jan 28 12:49:54 crc kubenswrapper[4685]: I0128 12:49:54.995711 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-2" podStartSLOduration=2.995680145 podStartE2EDuration="2.995680145s" podCreationTimestamp="2026-01-28 12:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:49:54.987507354 +0000 UTC m=+1746.074921189" watchObservedRunningTime="2026-01-28 12:49:54.995680145 +0000 UTC m=+1746.083093980" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.406538 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.408058 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.408145 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.428891 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.428939 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.428949 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.431506 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.431566 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.461453 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.466235 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.468261 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.476602 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.540529 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2rtsx"] Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.542556 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.544466 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.544511 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.546403 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.558524 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2rtsx"] Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.558818 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.558845 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.558856 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.581696 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.581764 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.589675 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.598189 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.603600 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.625219 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.701372 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-utilities\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.701475 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-catalog-content\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.701507 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k8bg\" (UniqueName: \"kubernetes.io/projected/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-kube-api-access-2k8bg\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.804561 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-catalog-content\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.804609 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k8bg\" (UniqueName: \"kubernetes.io/projected/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-kube-api-access-2k8bg\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.804724 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-utilities\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.805074 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-catalog-content\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.805117 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-utilities\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.823639 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k8bg\" (UniqueName: \"kubernetes.io/projected/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-kube-api-access-2k8bg\") pod \"redhat-marketplace-2rtsx\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.861755 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.932404 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.932442 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.932454 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.932466 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.932479 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.933189 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.941162 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.941453 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.941468 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.941496 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.941510 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.941521 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.963957 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.973723 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.977760 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.977873 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.982972 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.983192 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.983333 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.983463 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.989892 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.995716 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.996001 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:03 crc kubenswrapper[4685]: I0128 12:50:03.999599 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:04 crc kubenswrapper[4685]: I0128 12:50:04.339188 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2rtsx"] Jan 28 12:50:04 crc kubenswrapper[4685]: I0128 12:50:04.545944 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:50:04 crc kubenswrapper[4685]: E0128 12:50:04.546219 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:50:04 crc kubenswrapper[4685]: I0128 12:50:04.940905 4685 generic.go:334] "Generic (PLEG): container finished" podID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerID="17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51" exitCode=0 Jan 28 12:50:04 crc kubenswrapper[4685]: I0128 12:50:04.940981 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2rtsx" event={"ID":"15ce0167-5fd5-4cf1-8505-8e2090fa5da0","Type":"ContainerDied","Data":"17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51"} Jan 28 12:50:04 crc kubenswrapper[4685]: I0128 12:50:04.941031 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2rtsx" event={"ID":"15ce0167-5fd5-4cf1-8505-8e2090fa5da0","Type":"ContainerStarted","Data":"35fa4cce6e2c87ae3f34b28936e5bce3a08e7d8c6d907885ec5f9e61da313bff"} Jan 28 12:50:04 crc kubenswrapper[4685]: I0128 12:50:04.941543 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:50:04 crc kubenswrapper[4685]: I0128 12:50:04.956640 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:50:05 crc kubenswrapper[4685]: I0128 12:50:05.117314 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:50:05 crc kubenswrapper[4685]: I0128 12:50:05.131218 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:50:05 crc kubenswrapper[4685]: I0128 12:50:05.950759 4685 generic.go:334] "Generic (PLEG): container finished" podID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerID="7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda" exitCode=0 Jan 28 12:50:05 crc kubenswrapper[4685]: I0128 12:50:05.950809 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2rtsx" event={"ID":"15ce0167-5fd5-4cf1-8505-8e2090fa5da0","Type":"ContainerDied","Data":"7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda"} Jan 28 12:50:05 crc kubenswrapper[4685]: I0128 12:50:05.951932 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-2" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-api" containerID="cri-o://361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29" gracePeriod=30 Jan 28 12:50:05 crc kubenswrapper[4685]: I0128 12:50:05.951984 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-2" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-httpd" containerID="cri-o://9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed" gracePeriod=30 Jan 28 12:50:05 crc kubenswrapper[4685]: I0128 12:50:05.952072 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-2" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-log" containerID="cri-o://30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.771049 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867438 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-run\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867538 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-var-locks-brick\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867560 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-dev\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867589 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-config-data\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867609 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-logs\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867636 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-sys\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867651 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-iscsi\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867661 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867696 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867720 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-scripts\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867722 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-run" (OuterVolumeSpecName: "run") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867746 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867761 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-lib-modules\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867784 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-nvme\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867803 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-httpd-run\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867849 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-dev" (OuterVolumeSpecName: "dev") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.867874 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fbhq\" (UniqueName: \"kubernetes.io/projected/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-kube-api-access-4fbhq\") pod \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\" (UID: \"edce2723-cc1f-4fc1-b42d-15a02e81d4b9\") " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868152 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-logs" (OuterVolumeSpecName: "logs") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868214 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-sys" (OuterVolumeSpecName: "sys") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868241 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868275 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868300 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868577 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868871 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868890 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868899 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868907 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868916 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868926 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868934 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868941 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.868948 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.873625 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-kube-api-access-4fbhq" (OuterVolumeSpecName: "kube-api-access-4fbhq") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "kube-api-access-4fbhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.873771 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance-cache") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.874369 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.879022 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-scripts" (OuterVolumeSpecName: "scripts") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.956374 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-config-data" (OuterVolumeSpecName: "config-data") pod "edce2723-cc1f-4fc1-b42d-15a02e81d4b9" (UID: "edce2723-cc1f-4fc1-b42d-15a02e81d4b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964706 4685 generic.go:334] "Generic (PLEG): container finished" podID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerID="361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29" exitCode=0 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964738 4685 generic.go:334] "Generic (PLEG): container finished" podID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerID="9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed" exitCode=0 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964749 4685 generic.go:334] "Generic (PLEG): container finished" podID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerID="30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8" exitCode=143 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964768 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964807 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerDied","Data":"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29"} Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964837 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerDied","Data":"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed"} Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964849 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerDied","Data":"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8"} Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964865 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"edce2723-cc1f-4fc1-b42d-15a02e81d4b9","Type":"ContainerDied","Data":"c54dbd5d78bbd561a607871f340674510b6636e6176238ed5f4d8b4bb7c7882f"} Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.964887 4685 scope.go:117] "RemoveContainer" containerID="361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.968881 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-2" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-log" containerID="cri-o://1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969353 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2rtsx" event={"ID":"15ce0167-5fd5-4cf1-8505-8e2090fa5da0","Type":"ContainerStarted","Data":"97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8"} Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969425 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-2" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-httpd" containerID="cri-o://84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969425 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-2" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-api" containerID="cri-o://9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969510 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-log" containerID="cri-o://8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969627 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-api" containerID="cri-o://30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969671 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-httpd" containerID="cri-o://d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969862 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-log" containerID="cri-o://fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969909 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-httpd" containerID="cri-o://f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.969955 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-api" containerID="cri-o://e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3" gracePeriod=30 Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.970254 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fbhq\" (UniqueName: \"kubernetes.io/projected/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-kube-api-access-4fbhq\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.970277 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.970306 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.970322 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/edce2723-cc1f-4fc1-b42d-15a02e81d4b9-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.970338 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.995025 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 12:50:06 crc kubenswrapper[4685]: I0128 12:50:06.999008 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2rtsx" podStartSLOduration=2.452065337 podStartE2EDuration="3.998986497s" podCreationTimestamp="2026-01-28 12:50:03 +0000 UTC" firstStartedPulling="2026-01-28 12:50:04.942951134 +0000 UTC m=+1756.030364969" lastFinishedPulling="2026-01-28 12:50:06.489872294 +0000 UTC m=+1757.577286129" observedRunningTime="2026-01-28 12:50:06.998684809 +0000 UTC m=+1758.086098654" watchObservedRunningTime="2026-01-28 12:50:06.998986497 +0000 UTC m=+1758.086400332" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.016226 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.021353 4685 scope.go:117] "RemoveContainer" containerID="9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.028222 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.045415 4685 scope.go:117] "RemoveContainer" containerID="30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.047286 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.062747 4685 scope.go:117] "RemoveContainer" containerID="361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29" Jan 28 12:50:07 crc kubenswrapper[4685]: E0128 12:50:07.063719 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29\": container with ID starting with 361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29 not found: ID does not exist" containerID="361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.063761 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29"} err="failed to get container status \"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29\": rpc error: code = NotFound desc = could not find container \"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29\": container with ID starting with 361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29 not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.063788 4685 scope.go:117] "RemoveContainer" containerID="9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed" Jan 28 12:50:07 crc kubenswrapper[4685]: E0128 12:50:07.064126 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed\": container with ID starting with 9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed not found: ID does not exist" containerID="9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.066282 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed"} err="failed to get container status \"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed\": rpc error: code = NotFound desc = could not find container \"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed\": container with ID starting with 9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.066341 4685 scope.go:117] "RemoveContainer" containerID="30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8" Jan 28 12:50:07 crc kubenswrapper[4685]: E0128 12:50:07.066913 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8\": container with ID starting with 30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8 not found: ID does not exist" containerID="30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.066956 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8"} err="failed to get container status \"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8\": rpc error: code = NotFound desc = could not find container \"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8\": container with ID starting with 30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8 not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.066986 4685 scope.go:117] "RemoveContainer" containerID="361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.067469 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29"} err="failed to get container status \"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29\": rpc error: code = NotFound desc = could not find container \"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29\": container with ID starting with 361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29 not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.067499 4685 scope.go:117] "RemoveContainer" containerID="9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.067825 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed"} err="failed to get container status \"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed\": rpc error: code = NotFound desc = could not find container \"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed\": container with ID starting with 9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.067852 4685 scope.go:117] "RemoveContainer" containerID="30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.068147 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8"} err="failed to get container status \"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8\": rpc error: code = NotFound desc = could not find container \"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8\": container with ID starting with 30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8 not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.068187 4685 scope.go:117] "RemoveContainer" containerID="361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.068479 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29"} err="failed to get container status \"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29\": rpc error: code = NotFound desc = could not find container \"361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29\": container with ID starting with 361953c826cc86906e409bb9b5e9b365354969406af6f35ef8b6c040e11a7e29 not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.068503 4685 scope.go:117] "RemoveContainer" containerID="9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.069161 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed"} err="failed to get container status \"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed\": rpc error: code = NotFound desc = could not find container \"9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed\": container with ID starting with 9fa0f723dcb932fbb3132a36a782455b0f9ae602aa6173d982ce63132b70b5ed not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.069198 4685 scope.go:117] "RemoveContainer" containerID="30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.070398 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8"} err="failed to get container status \"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8\": rpc error: code = NotFound desc = could not find container \"30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8\": container with ID starting with 30c3c80086fc98cec16e5e1151ba0fe3982a86d6dfea4e192f364fbc327dc3e8 not found: ID does not exist" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.071585 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.071624 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.761884 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890636 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-nvme\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890690 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-lib-modules\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890735 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-config-data\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890753 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890772 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890827 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7548\" (UniqueName: \"kubernetes.io/projected/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-kube-api-access-v7548\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890870 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-dev\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890904 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-run\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890934 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-httpd-run\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.890989 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-iscsi\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.891007 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-var-locks-brick\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.891033 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-sys\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.891051 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-logs\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.891064 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-scripts\") pod \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\" (UID: \"f2ae8567-d50d-4940-8573-37c5cb1d1b8a\") " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.892201 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-dev" (OuterVolumeSpecName: "dev") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.892246 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.892265 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.896308 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.896350 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.896389 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-run" (OuterVolumeSpecName: "run") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.896316 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-sys" (OuterVolumeSpecName: "sys") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.896819 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.896886 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-logs" (OuterVolumeSpecName: "logs") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.897612 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-scripts" (OuterVolumeSpecName: "scripts") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.901956 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance-cache") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.905486 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.908158 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-kube-api-access-v7548" (OuterVolumeSpecName: "kube-api-access-v7548") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "kube-api-access-v7548". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.956523 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.987123 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992786 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7548\" (UniqueName: \"kubernetes.io/projected/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-kube-api-access-v7548\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992815 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992824 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992833 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992842 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992851 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992863 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992878 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992887 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992896 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992905 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992929 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 28 12:50:07 crc kubenswrapper[4685]: I0128 12:50:07.992944 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015411 4685 generic.go:334] "Generic (PLEG): container finished" podID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerID="30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015463 4685 generic.go:334] "Generic (PLEG): container finished" podID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerID="d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015475 4685 generic.go:334] "Generic (PLEG): container finished" podID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerID="8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f" exitCode=143 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015582 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerDied","Data":"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015641 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerDied","Data":"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015657 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerDied","Data":"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015670 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"05e1a5a6-7b96-47cb-9221-df4f61a49880","Type":"ContainerDied","Data":"1c418455d3b255ad9ce11a17b314fce13383955f79bae5c9b1ead658a711c9b4"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015694 4685 scope.go:117] "RemoveContainer" containerID="30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.015948 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.024783 4685 generic.go:334] "Generic (PLEG): container finished" podID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerID="e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.024828 4685 generic.go:334] "Generic (PLEG): container finished" podID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerID="f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.024842 4685 generic.go:334] "Generic (PLEG): container finished" podID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerID="fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d" exitCode=143 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.024910 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerDied","Data":"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.024942 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerDied","Data":"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.024953 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerDied","Data":"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.024971 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"f2ae8567-d50d-4940-8573-37c5cb1d1b8a","Type":"ContainerDied","Data":"a480b2ded74a0181a87c1528260c0ab269c7d43249773ddf17a321e4b50a7506"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.025077 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.042325 4685 generic.go:334] "Generic (PLEG): container finished" podID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerID="9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.042423 4685 generic.go:334] "Generic (PLEG): container finished" podID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerID="84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74" exitCode=0 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.042432 4685 generic.go:334] "Generic (PLEG): container finished" podID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerID="1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e" exitCode=143 Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.043751 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.043997 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerDied","Data":"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.044083 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerDied","Data":"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.044100 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerDied","Data":"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.044111 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"3b051e6c-2e8b-4ee7-b6e5-544f67c66526","Type":"ContainerDied","Data":"8b4119ad9dc29e161a25a8c2cf9052f85e52d6abc2f6d266015d7b11f7bba870"} Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.050550 4685 scope.go:117] "RemoveContainer" containerID="d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.066755 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.067279 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-config-data" (OuterVolumeSpecName: "config-data") pod "f2ae8567-d50d-4940-8573-37c5cb1d1b8a" (UID: "f2ae8567-d50d-4940-8573-37c5cb1d1b8a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.085013 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.090621 4685 scope.go:117] "RemoveContainer" containerID="8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.094911 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-iscsi\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095020 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-scripts\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095055 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-dev\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095053 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095079 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-lib-modules\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095113 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-nvme\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095118 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-dev" (OuterVolumeSpecName: "dev") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095147 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095154 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-var-locks-brick\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095194 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095199 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-run\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095252 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-run" (OuterVolumeSpecName: "run") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095268 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndlmq\" (UniqueName: \"kubernetes.io/projected/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-kube-api-access-ndlmq\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095295 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095304 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-config-data\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095352 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-dev\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095377 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095410 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-logs\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095445 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-lib-modules\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095449 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-dev" (OuterVolumeSpecName: "dev") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095469 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-httpd-run\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095528 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095555 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-nvme\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095581 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-iscsi\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095624 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wd5vk\" (UniqueName: \"kubernetes.io/projected/05e1a5a6-7b96-47cb-9221-df4f61a49880-kube-api-access-wd5vk\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095640 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095667 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-scripts\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095695 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-sys\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095712 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-sys\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095733 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-run\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095739 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095749 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-var-locks-brick\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095768 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095850 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-config-data\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095882 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-httpd-run\") pod \"05e1a5a6-7b96-47cb-9221-df4f61a49880\" (UID: \"05e1a5a6-7b96-47cb-9221-df4f61a49880\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095931 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-logs\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095948 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-logs" (OuterVolumeSpecName: "logs") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.095964 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\" (UID: \"3b051e6c-2e8b-4ee7-b6e5-544f67c66526\") " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096689 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096712 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096724 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096734 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096744 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096754 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2ae8567-d50d-4940-8573-37c5cb1d1b8a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096766 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096777 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096792 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096802 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096812 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096823 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096833 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096762 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-logs" (OuterVolumeSpecName: "logs") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096835 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096890 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-sys" (OuterVolumeSpecName: "sys") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.096915 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.097048 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.097092 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.097121 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-run" (OuterVolumeSpecName: "run") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.099959 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage17-crc" (OuterVolumeSpecName: "glance") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "local-storage17-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.100566 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-sys" (OuterVolumeSpecName: "sys") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.101005 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05e1a5a6-7b96-47cb-9221-df4f61a49880-kube-api-access-wd5vk" (OuterVolumeSpecName: "kube-api-access-wd5vk") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "kube-api-access-wd5vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.101901 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-scripts" (OuterVolumeSpecName: "scripts") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.102848 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage13-crc" (OuterVolumeSpecName: "glance-cache") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "local-storage13-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.104642 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-scripts" (OuterVolumeSpecName: "scripts") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.105326 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.108411 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-kube-api-access-ndlmq" (OuterVolumeSpecName: "kube-api-access-ndlmq") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "kube-api-access-ndlmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.120205 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage18-crc" (OuterVolumeSpecName: "glance-cache") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "local-storage18-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.123865 4685 scope.go:117] "RemoveContainer" containerID="30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.125535 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3\": container with ID starting with 30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3 not found: ID does not exist" containerID="30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.125575 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3"} err="failed to get container status \"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3\": rpc error: code = NotFound desc = could not find container \"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3\": container with ID starting with 30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.125597 4685 scope.go:117] "RemoveContainer" containerID="d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.126052 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e\": container with ID starting with d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e not found: ID does not exist" containerID="d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126090 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e"} err="failed to get container status \"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e\": rpc error: code = NotFound desc = could not find container \"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e\": container with ID starting with d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126116 4685 scope.go:117] "RemoveContainer" containerID="8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.126415 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f\": container with ID starting with 8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f not found: ID does not exist" containerID="8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126437 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f"} err="failed to get container status \"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f\": rpc error: code = NotFound desc = could not find container \"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f\": container with ID starting with 8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126450 4685 scope.go:117] "RemoveContainer" containerID="30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126736 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3"} err="failed to get container status \"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3\": rpc error: code = NotFound desc = could not find container \"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3\": container with ID starting with 30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126762 4685 scope.go:117] "RemoveContainer" containerID="d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126934 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e"} err="failed to get container status \"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e\": rpc error: code = NotFound desc = could not find container \"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e\": container with ID starting with d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.126951 4685 scope.go:117] "RemoveContainer" containerID="8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127105 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f"} err="failed to get container status \"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f\": rpc error: code = NotFound desc = could not find container \"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f\": container with ID starting with 8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127124 4685 scope.go:117] "RemoveContainer" containerID="30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127304 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3"} err="failed to get container status \"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3\": rpc error: code = NotFound desc = could not find container \"30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3\": container with ID starting with 30045553997ea4d279536e409248b21b9eb186e47801f997f008c1a147d5a3e3 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127319 4685 scope.go:117] "RemoveContainer" containerID="d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127471 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e"} err="failed to get container status \"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e\": rpc error: code = NotFound desc = could not find container \"d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e\": container with ID starting with d715e5a67e947f39655c6f960f5ea49d783db9c6f19cefab0bce02f04115c50e not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127487 4685 scope.go:117] "RemoveContainer" containerID="8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127637 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f"} err="failed to get container status \"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f\": rpc error: code = NotFound desc = could not find container \"8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f\": container with ID starting with 8b32db8a609d74dc341636bce8ade3b90389d1e4a86d17ed2615b3e6ba0a354f not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.127655 4685 scope.go:117] "RemoveContainer" containerID="e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.158463 4685 scope.go:117] "RemoveContainer" containerID="f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198076 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wd5vk\" (UniqueName: \"kubernetes.io/projected/05e1a5a6-7b96-47cb-9221-df4f61a49880-kube-api-access-wd5vk\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198117 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198128 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198139 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198147 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198155 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198165 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05e1a5a6-7b96-47cb-9221-df4f61a49880-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198192 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198203 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") on node \"crc\" " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198211 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198219 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndlmq\" (UniqueName: \"kubernetes.io/projected/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-kube-api-access-ndlmq\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198230 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198238 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198250 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") on node \"crc\" " Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198258 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.198266 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05e1a5a6-7b96-47cb-9221-df4f61a49880-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.216157 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-config-data" (OuterVolumeSpecName: "config-data") pod "3b051e6c-2e8b-4ee7-b6e5-544f67c66526" (UID: "3b051e6c-2e8b-4ee7-b6e5-544f67c66526"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.227695 4685 scope.go:117] "RemoveContainer" containerID="fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.234103 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-config-data" (OuterVolumeSpecName: "config-data") pod "05e1a5a6-7b96-47cb-9221-df4f61a49880" (UID: "05e1a5a6-7b96-47cb-9221-df4f61a49880"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.241325 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.249102 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage13-crc" (UniqueName: "kubernetes.io/local-volume/local-storage13-crc") on node "crc" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.252099 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage17-crc" (UniqueName: "kubernetes.io/local-volume/local-storage17-crc") on node "crc" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.266828 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage18-crc" (UniqueName: "kubernetes.io/local-volume/local-storage18-crc") on node "crc" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.275737 4685 scope.go:117] "RemoveContainer" containerID="e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.276203 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3\": container with ID starting with e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3 not found: ID does not exist" containerID="e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.276241 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3"} err="failed to get container status \"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3\": rpc error: code = NotFound desc = could not find container \"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3\": container with ID starting with e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.276264 4685 scope.go:117] "RemoveContainer" containerID="f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.276512 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60\": container with ID starting with f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60 not found: ID does not exist" containerID="f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.276533 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60"} err="failed to get container status \"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60\": rpc error: code = NotFound desc = could not find container \"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60\": container with ID starting with f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.276547 4685 scope.go:117] "RemoveContainer" containerID="fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.276776 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d\": container with ID starting with fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d not found: ID does not exist" containerID="fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.276798 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d"} err="failed to get container status \"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d\": rpc error: code = NotFound desc = could not find container \"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d\": container with ID starting with fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.276812 4685 scope.go:117] "RemoveContainer" containerID="e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277064 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3"} err="failed to get container status \"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3\": rpc error: code = NotFound desc = could not find container \"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3\": container with ID starting with e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277090 4685 scope.go:117] "RemoveContainer" containerID="f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277308 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60"} err="failed to get container status \"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60\": rpc error: code = NotFound desc = could not find container \"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60\": container with ID starting with f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277327 4685 scope.go:117] "RemoveContainer" containerID="fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277552 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d"} err="failed to get container status \"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d\": rpc error: code = NotFound desc = could not find container \"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d\": container with ID starting with fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277612 4685 scope.go:117] "RemoveContainer" containerID="e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277802 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3"} err="failed to get container status \"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3\": rpc error: code = NotFound desc = could not find container \"e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3\": container with ID starting with e8d8d590a55f273d3f9dc6f162b668750042ebf4b4006fd23a1941173ca021d3 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.277843 4685 scope.go:117] "RemoveContainer" containerID="f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.278036 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60"} err="failed to get container status \"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60\": rpc error: code = NotFound desc = could not find container \"f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60\": container with ID starting with f5276411704d1cba42a9fcbfa2a6fba47742a795282cf5f871176e75f5008e60 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.278097 4685 scope.go:117] "RemoveContainer" containerID="fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.278381 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d"} err="failed to get container status \"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d\": rpc error: code = NotFound desc = could not find container \"fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d\": container with ID starting with fc6def7d815eab4509001df1252a7fc1aa42e85050d5b4e960c4ce0788b18a7d not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.278416 4685 scope.go:117] "RemoveContainer" containerID="9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.302672 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.302717 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b051e6c-2e8b-4ee7-b6e5-544f67c66526-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.302754 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.302767 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05e1a5a6-7b96-47cb-9221-df4f61a49880-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.302780 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.302789 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.316432 4685 scope.go:117] "RemoveContainer" containerID="84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.353722 4685 scope.go:117] "RemoveContainer" containerID="1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.355646 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.368266 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.382556 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.384287 4685 scope.go:117] "RemoveContainer" containerID="9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.384844 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9\": container with ID starting with 9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9 not found: ID does not exist" containerID="9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.384998 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9"} err="failed to get container status \"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9\": rpc error: code = NotFound desc = could not find container \"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9\": container with ID starting with 9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.385082 4685 scope.go:117] "RemoveContainer" containerID="84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.385505 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74\": container with ID starting with 84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74 not found: ID does not exist" containerID="84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.385533 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74"} err="failed to get container status \"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74\": rpc error: code = NotFound desc = could not find container \"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74\": container with ID starting with 84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.385556 4685 scope.go:117] "RemoveContainer" containerID="1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e" Jan 28 12:50:08 crc kubenswrapper[4685]: E0128 12:50:08.386025 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e\": container with ID starting with 1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e not found: ID does not exist" containerID="1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.386081 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e"} err="failed to get container status \"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e\": rpc error: code = NotFound desc = could not find container \"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e\": container with ID starting with 1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.386114 4685 scope.go:117] "RemoveContainer" containerID="9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.387045 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9"} err="failed to get container status \"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9\": rpc error: code = NotFound desc = could not find container \"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9\": container with ID starting with 9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.387071 4685 scope.go:117] "RemoveContainer" containerID="84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.387554 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74"} err="failed to get container status \"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74\": rpc error: code = NotFound desc = could not find container \"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74\": container with ID starting with 84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.387575 4685 scope.go:117] "RemoveContainer" containerID="1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.388513 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e"} err="failed to get container status \"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e\": rpc error: code = NotFound desc = could not find container \"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e\": container with ID starting with 1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.388540 4685 scope.go:117] "RemoveContainer" containerID="9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.389125 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9"} err="failed to get container status \"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9\": rpc error: code = NotFound desc = could not find container \"9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9\": container with ID starting with 9fbae00e48371329bc9e34a0cad3289ab16fac55a96bec4cac492273ec7bf5b9 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.389148 4685 scope.go:117] "RemoveContainer" containerID="84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.390376 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74"} err="failed to get container status \"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74\": rpc error: code = NotFound desc = could not find container \"84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74\": container with ID starting with 84b50dcfcb7ee86cfa629ed4c87e34a45c5cd76cf1656b55488e6da43327db74 not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.390485 4685 scope.go:117] "RemoveContainer" containerID="1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.390863 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e"} err="failed to get container status \"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e\": rpc error: code = NotFound desc = could not find container \"1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e\": container with ID starting with 1891646ba51dd1aab7edb4d27ab10c90de307c1fb041620edfcef656849beb8e not found: ID does not exist" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.392226 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.402476 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.409039 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.566108 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" path="/var/lib/kubelet/pods/05e1a5a6-7b96-47cb-9221-df4f61a49880/volumes" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.567024 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" path="/var/lib/kubelet/pods/3b051e6c-2e8b-4ee7-b6e5-544f67c66526/volumes" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.568293 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" path="/var/lib/kubelet/pods/edce2723-cc1f-4fc1-b42d-15a02e81d4b9/volumes" Jan 28 12:50:08 crc kubenswrapper[4685]: I0128 12:50:08.569229 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" path="/var/lib/kubelet/pods/f2ae8567-d50d-4940-8573-37c5cb1d1b8a/volumes" Jan 28 12:50:09 crc kubenswrapper[4685]: I0128 12:50:09.540835 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:50:09 crc kubenswrapper[4685]: I0128 12:50:09.541146 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-log" containerID="cri-o://fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4" gracePeriod=30 Jan 28 12:50:09 crc kubenswrapper[4685]: I0128 12:50:09.541583 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-api" containerID="cri-o://1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa" gracePeriod=30 Jan 28 12:50:09 crc kubenswrapper[4685]: I0128 12:50:09.541603 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-httpd" containerID="cri-o://68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40" gracePeriod=30 Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.040680 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.041281 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-log" containerID="cri-o://78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271" gracePeriod=30 Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.041421 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-api" containerID="cri-o://6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953" gracePeriod=30 Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.041462 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-httpd" containerID="cri-o://19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd" gracePeriod=30 Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.063484 4685 generic.go:334] "Generic (PLEG): container finished" podID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerID="68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40" exitCode=0 Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.063565 4685 generic.go:334] "Generic (PLEG): container finished" podID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerID="fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4" exitCode=143 Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.063593 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerDied","Data":"68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40"} Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.063627 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerDied","Data":"fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4"} Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.524995 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.639708 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-var-locks-brick\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.639846 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-scripts\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.639871 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.639887 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-lib-modules\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.639919 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.639969 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640000 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-sys\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640033 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-run\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640065 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-nvme\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640103 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-dev\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640116 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-iscsi\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640144 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-httpd-run\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640185 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-logs\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640212 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smmzz\" (UniqueName: \"kubernetes.io/projected/6d480af3-7335-4cde-8f1a-5e8c89339d61-kube-api-access-smmzz\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640242 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-config-data\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640258 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"6d480af3-7335-4cde-8f1a-5e8c89339d61\" (UID: \"6d480af3-7335-4cde-8f1a-5e8c89339d61\") " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640818 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640830 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.640968 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.641060 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-sys" (OuterVolumeSpecName: "sys") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.641091 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.641230 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-dev" (OuterVolumeSpecName: "dev") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.641071 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-run" (OuterVolumeSpecName: "run") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.641848 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.641927 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-logs" (OuterVolumeSpecName: "logs") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.645457 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance-cache") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.645748 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.646848 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d480af3-7335-4cde-8f1a-5e8c89339d61-kube-api-access-smmzz" (OuterVolumeSpecName: "kube-api-access-smmzz") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "kube-api-access-smmzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.649188 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-scripts" (OuterVolumeSpecName: "scripts") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742453 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742531 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742548 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742561 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742573 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742585 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742597 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/6d480af3-7335-4cde-8f1a-5e8c89339d61-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742608 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742620 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d480af3-7335-4cde-8f1a-5e8c89339d61-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742633 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smmzz\" (UniqueName: \"kubernetes.io/projected/6d480af3-7335-4cde-8f1a-5e8c89339d61-kube-api-access-smmzz\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.742653 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.755911 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-config-data" (OuterVolumeSpecName: "config-data") pod "6d480af3-7335-4cde-8f1a-5e8c89339d61" (UID: "6d480af3-7335-4cde-8f1a-5e8c89339d61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.771022 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.772434 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.844359 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d480af3-7335-4cde-8f1a-5e8c89339d61-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.844398 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.844412 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:10 crc kubenswrapper[4685]: I0128 12:50:10.887201 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047321 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-iscsi\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047415 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-run\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047472 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047551 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-dev\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047571 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-nvme\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047615 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txctj\" (UniqueName: \"kubernetes.io/projected/326f0b30-3e84-4b80-99c4-7fa87312efb2-kube-api-access-txctj\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047655 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047668 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-sys\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047701 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-config-data\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047721 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-lib-modules\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047748 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-logs\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047765 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-var-locks-brick\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047785 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-scripts\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.047806 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-httpd-run\") pod \"326f0b30-3e84-4b80-99c4-7fa87312efb2\" (UID: \"326f0b30-3e84-4b80-99c4-7fa87312efb2\") " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048301 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048340 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048359 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-run" (OuterVolumeSpecName: "run") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048613 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-sys" (OuterVolumeSpecName: "sys") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048654 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-dev" (OuterVolumeSpecName: "dev") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048677 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048867 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048923 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-logs" (OuterVolumeSpecName: "logs") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.048936 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.053360 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage14-crc" (OuterVolumeSpecName: "glance") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "local-storage14-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.053399 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/326f0b30-3e84-4b80-99c4-7fa87312efb2-kube-api-access-txctj" (OuterVolumeSpecName: "kube-api-access-txctj") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "kube-api-access-txctj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.053405 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance-cache") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.053414 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-scripts" (OuterVolumeSpecName: "scripts") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075076 4685 generic.go:334] "Generic (PLEG): container finished" podID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerID="6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953" exitCode=0 Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075110 4685 generic.go:334] "Generic (PLEG): container finished" podID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerID="19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd" exitCode=0 Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075120 4685 generic.go:334] "Generic (PLEG): container finished" podID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerID="78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271" exitCode=143 Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075150 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075120 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerDied","Data":"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953"} Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075328 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerDied","Data":"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd"} Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075350 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerDied","Data":"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271"} Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075359 4685 scope.go:117] "RemoveContainer" containerID="6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.075364 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"326f0b30-3e84-4b80-99c4-7fa87312efb2","Type":"ContainerDied","Data":"c6637be01e48cdb303a46fd315213369b704410c24fecb9e5436add52619de42"} Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.079466 4685 generic.go:334] "Generic (PLEG): container finished" podID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerID="1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa" exitCode=0 Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.079577 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.079596 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerDied","Data":"1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa"} Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.079728 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"6d480af3-7335-4cde-8f1a-5e8c89339d61","Type":"ContainerDied","Data":"fb28a047a1d87b1eff3b1242ca9b9799c6dcdc986cfe71b1fea99e0ffd7eb31d"} Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.098845 4685 scope.go:117] "RemoveContainer" containerID="19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.120674 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.126621 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.131359 4685 scope.go:117] "RemoveContainer" containerID="78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.132384 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-config-data" (OuterVolumeSpecName: "config-data") pod "326f0b30-3e84-4b80-99c4-7fa87312efb2" (UID: "326f0b30-3e84-4b80-99c4-7fa87312efb2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.147690 4685 scope.go:117] "RemoveContainer" containerID="6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953" Jan 28 12:50:11 crc kubenswrapper[4685]: E0128 12:50:11.148314 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953\": container with ID starting with 6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953 not found: ID does not exist" containerID="6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.148366 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953"} err="failed to get container status \"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953\": rpc error: code = NotFound desc = could not find container \"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953\": container with ID starting with 6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.148401 4685 scope.go:117] "RemoveContainer" containerID="19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd" Jan 28 12:50:11 crc kubenswrapper[4685]: E0128 12:50:11.148669 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd\": container with ID starting with 19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd not found: ID does not exist" containerID="19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.148697 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd"} err="failed to get container status \"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd\": rpc error: code = NotFound desc = could not find container \"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd\": container with ID starting with 19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.148714 4685 scope.go:117] "RemoveContainer" containerID="78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271" Jan 28 12:50:11 crc kubenswrapper[4685]: E0128 12:50:11.148939 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271\": container with ID starting with 78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271 not found: ID does not exist" containerID="78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.148968 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271"} err="failed to get container status \"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271\": rpc error: code = NotFound desc = could not find container \"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271\": container with ID starting with 78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.148985 4685 scope.go:117] "RemoveContainer" containerID="6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.149230 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953"} err="failed to get container status \"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953\": rpc error: code = NotFound desc = could not find container \"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953\": container with ID starting with 6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.149254 4685 scope.go:117] "RemoveContainer" containerID="19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.149906 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd"} err="failed to get container status \"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd\": rpc error: code = NotFound desc = could not find container \"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd\": container with ID starting with 19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.149931 4685 scope.go:117] "RemoveContainer" containerID="78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150003 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150026 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150038 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150049 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150076 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150086 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150096 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150107 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txctj\" (UniqueName: \"kubernetes.io/projected/326f0b30-3e84-4b80-99c4-7fa87312efb2-kube-api-access-txctj\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150122 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150131 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150140 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/326f0b30-3e84-4b80-99c4-7fa87312efb2-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150149 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150160 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/326f0b30-3e84-4b80-99c4-7fa87312efb2-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150160 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271"} err="failed to get container status \"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271\": rpc error: code = NotFound desc = could not find container \"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271\": container with ID starting with 78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150200 4685 scope.go:117] "RemoveContainer" containerID="6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150187 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/326f0b30-3e84-4b80-99c4-7fa87312efb2-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150436 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953"} err="failed to get container status \"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953\": rpc error: code = NotFound desc = could not find container \"6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953\": container with ID starting with 6f70533279b78507f8292c7a03e989f4f4745f7dd4270955ab0e5e85b245e953 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150455 4685 scope.go:117] "RemoveContainer" containerID="19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150626 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd"} err="failed to get container status \"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd\": rpc error: code = NotFound desc = could not find container \"19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd\": container with ID starting with 19dc127d234c141c94ce330f1dd0e7f1c57f5b21b9f94c12465dcc521dcdcffd not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150650 4685 scope.go:117] "RemoveContainer" containerID="78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150803 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271"} err="failed to get container status \"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271\": rpc error: code = NotFound desc = could not find container \"78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271\": container with ID starting with 78f39acf87366dc1624475495f9f41c0817aa6fb73e87890fc32bd3a9215f271 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.150822 4685 scope.go:117] "RemoveContainer" containerID="1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.164136 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.167523 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage14-crc" (UniqueName: "kubernetes.io/local-volume/local-storage14-crc") on node "crc" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.167570 4685 scope.go:117] "RemoveContainer" containerID="68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.190157 4685 scope.go:117] "RemoveContainer" containerID="fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.207636 4685 scope.go:117] "RemoveContainer" containerID="1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa" Jan 28 12:50:11 crc kubenswrapper[4685]: E0128 12:50:11.208046 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa\": container with ID starting with 1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa not found: ID does not exist" containerID="1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.208087 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa"} err="failed to get container status \"1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa\": rpc error: code = NotFound desc = could not find container \"1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa\": container with ID starting with 1e1053d37523136fbb90c95478efe81e8c162e7130f6707635232a9e6179b2fa not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.208113 4685 scope.go:117] "RemoveContainer" containerID="68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40" Jan 28 12:50:11 crc kubenswrapper[4685]: E0128 12:50:11.208427 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40\": container with ID starting with 68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40 not found: ID does not exist" containerID="68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.208465 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40"} err="failed to get container status \"68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40\": rpc error: code = NotFound desc = could not find container \"68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40\": container with ID starting with 68d535a3f683da43afeed5c547fb0ba352bc33e76efa8810042dfd483dadca40 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.208478 4685 scope.go:117] "RemoveContainer" containerID="fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4" Jan 28 12:50:11 crc kubenswrapper[4685]: E0128 12:50:11.208684 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4\": container with ID starting with fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4 not found: ID does not exist" containerID="fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.208723 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4"} err="failed to get container status \"fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4\": rpc error: code = NotFound desc = could not find container \"fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4\": container with ID starting with fc9aa87ea3589a04c2928fbc5b9a37bfe28ffbba7e027fab51e773b2d656fad4 not found: ID does not exist" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.251800 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.251842 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.411991 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:11 crc kubenswrapper[4685]: I0128 12:50:11.419265 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.404407 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-vdwfv"] Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.411574 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-vdwfv"] Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.448959 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance74b0-account-delete-hbt4h"] Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449333 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449352 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449369 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449377 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449388 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449396 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449408 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449417 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449428 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449435 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449448 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449455 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449466 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449472 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449483 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449490 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449505 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449512 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449522 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449529 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449544 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449551 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449564 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449571 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449582 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449589 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449604 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449611 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449621 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449628 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449637 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449644 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449651 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449658 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: E0128 12:50:12.449669 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449676 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449813 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449827 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449835 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449843 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449851 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449864 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449874 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449881 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449893 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449903 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449911 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449921 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449932 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2ae8567-d50d-4940-8573-37c5cb1d1b8a" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449944 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="05e1a5a6-7b96-47cb-9221-df4f61a49880" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449952 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="edce2723-cc1f-4fc1-b42d-15a02e81d4b9" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449963 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-httpd" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449971 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" containerName="glance-api" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.449981 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b051e6c-2e8b-4ee7-b6e5-544f67c66526" containerName="glance-log" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.450543 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.461429 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance74b0-account-delete-hbt4h"] Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.503614 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-operator-scripts\") pod \"glance74b0-account-delete-hbt4h\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.503658 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb9kd\" (UniqueName: \"kubernetes.io/projected/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-kube-api-access-gb9kd\") pod \"glance74b0-account-delete-hbt4h\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.558010 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="326f0b30-3e84-4b80-99c4-7fa87312efb2" path="/var/lib/kubelet/pods/326f0b30-3e84-4b80-99c4-7fa87312efb2/volumes" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.558653 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34f1ac52-5781-4029-9ba6-5bc9f7d7a193" path="/var/lib/kubelet/pods/34f1ac52-5781-4029-9ba6-5bc9f7d7a193/volumes" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.559415 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d480af3-7335-4cde-8f1a-5e8c89339d61" path="/var/lib/kubelet/pods/6d480af3-7335-4cde-8f1a-5e8c89339d61/volumes" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.604877 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-operator-scripts\") pod \"glance74b0-account-delete-hbt4h\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.604946 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb9kd\" (UniqueName: \"kubernetes.io/projected/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-kube-api-access-gb9kd\") pod \"glance74b0-account-delete-hbt4h\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.606147 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-operator-scripts\") pod \"glance74b0-account-delete-hbt4h\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.625098 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb9kd\" (UniqueName: \"kubernetes.io/projected/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-kube-api-access-gb9kd\") pod \"glance74b0-account-delete-hbt4h\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:12 crc kubenswrapper[4685]: I0128 12:50:12.815744 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:13 crc kubenswrapper[4685]: I0128 12:50:13.242486 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance74b0-account-delete-hbt4h"] Jan 28 12:50:13 crc kubenswrapper[4685]: I0128 12:50:13.862542 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:13 crc kubenswrapper[4685]: I0128 12:50:13.862602 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:13 crc kubenswrapper[4685]: I0128 12:50:13.926273 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:14 crc kubenswrapper[4685]: I0128 12:50:14.104459 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" event={"ID":"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd","Type":"ContainerStarted","Data":"2f186a9e8ed3be761c5372746f37c74b08caeb6c743cd51e89d44a1697b34248"} Jan 28 12:50:14 crc kubenswrapper[4685]: I0128 12:50:14.149188 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:14 crc kubenswrapper[4685]: I0128 12:50:14.209766 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2rtsx"] Jan 28 12:50:15 crc kubenswrapper[4685]: I0128 12:50:15.114702 4685 generic.go:334] "Generic (PLEG): container finished" podID="1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd" containerID="3c2bee5a5a7b328e99be338aaf06f86b8d50d5f12e3143178eca52eb0cb2f202" exitCode=0 Jan 28 12:50:15 crc kubenswrapper[4685]: I0128 12:50:15.114800 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" event={"ID":"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd","Type":"ContainerDied","Data":"3c2bee5a5a7b328e99be338aaf06f86b8d50d5f12e3143178eca52eb0cb2f202"} Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.121453 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2rtsx" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="registry-server" containerID="cri-o://97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8" gracePeriod=2 Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.403037 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.563826 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-operator-scripts\") pod \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.564044 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gb9kd\" (UniqueName: \"kubernetes.io/projected/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-kube-api-access-gb9kd\") pod \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\" (UID: \"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd\") " Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.565385 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd" (UID: "1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.571346 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-kube-api-access-gb9kd" (OuterVolumeSpecName: "kube-api-access-gb9kd") pod "1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd" (UID: "1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd"). InnerVolumeSpecName "kube-api-access-gb9kd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.644862 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.668230 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.668269 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gb9kd\" (UniqueName: \"kubernetes.io/projected/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd-kube-api-access-gb9kd\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.770067 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k8bg\" (UniqueName: \"kubernetes.io/projected/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-kube-api-access-2k8bg\") pod \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.770304 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-utilities\") pod \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.770412 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-catalog-content\") pod \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\" (UID: \"15ce0167-5fd5-4cf1-8505-8e2090fa5da0\") " Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.771053 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-utilities" (OuterVolumeSpecName: "utilities") pod "15ce0167-5fd5-4cf1-8505-8e2090fa5da0" (UID: "15ce0167-5fd5-4cf1-8505-8e2090fa5da0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.771479 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.774110 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-kube-api-access-2k8bg" (OuterVolumeSpecName: "kube-api-access-2k8bg") pod "15ce0167-5fd5-4cf1-8505-8e2090fa5da0" (UID: "15ce0167-5fd5-4cf1-8505-8e2090fa5da0"). InnerVolumeSpecName "kube-api-access-2k8bg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.792033 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15ce0167-5fd5-4cf1-8505-8e2090fa5da0" (UID: "15ce0167-5fd5-4cf1-8505-8e2090fa5da0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.872421 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:16 crc kubenswrapper[4685]: I0128 12:50:16.872475 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k8bg\" (UniqueName: \"kubernetes.io/projected/15ce0167-5fd5-4cf1-8505-8e2090fa5da0-kube-api-access-2k8bg\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.129468 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" event={"ID":"1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd","Type":"ContainerDied","Data":"2f186a9e8ed3be761c5372746f37c74b08caeb6c743cd51e89d44a1697b34248"} Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.129843 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f186a9e8ed3be761c5372746f37c74b08caeb6c743cd51e89d44a1697b34248" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.129485 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance74b0-account-delete-hbt4h" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.132639 4685 generic.go:334] "Generic (PLEG): container finished" podID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerID="97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8" exitCode=0 Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.132673 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2rtsx" event={"ID":"15ce0167-5fd5-4cf1-8505-8e2090fa5da0","Type":"ContainerDied","Data":"97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8"} Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.132705 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2rtsx" event={"ID":"15ce0167-5fd5-4cf1-8505-8e2090fa5da0","Type":"ContainerDied","Data":"35fa4cce6e2c87ae3f34b28936e5bce3a08e7d8c6d907885ec5f9e61da313bff"} Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.132710 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2rtsx" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.132725 4685 scope.go:117] "RemoveContainer" containerID="97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.158029 4685 scope.go:117] "RemoveContainer" containerID="7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.175838 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2rtsx"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.183091 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2rtsx"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.194396 4685 scope.go:117] "RemoveContainer" containerID="17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.213073 4685 scope.go:117] "RemoveContainer" containerID="97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8" Jan 28 12:50:17 crc kubenswrapper[4685]: E0128 12:50:17.213543 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8\": container with ID starting with 97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8 not found: ID does not exist" containerID="97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.213578 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8"} err="failed to get container status \"97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8\": rpc error: code = NotFound desc = could not find container \"97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8\": container with ID starting with 97bca1bec0fb04ec9e47b41e078c2a52c0a4504d0077496406b285924f8fa6f8 not found: ID does not exist" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.213601 4685 scope.go:117] "RemoveContainer" containerID="7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda" Jan 28 12:50:17 crc kubenswrapper[4685]: E0128 12:50:17.213892 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda\": container with ID starting with 7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda not found: ID does not exist" containerID="7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.213915 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda"} err="failed to get container status \"7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda\": rpc error: code = NotFound desc = could not find container \"7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda\": container with ID starting with 7bf274f0e4b77b2137ee5eb836bd3b33cec8458c6771bf5a499160300d476eda not found: ID does not exist" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.213933 4685 scope.go:117] "RemoveContainer" containerID="17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51" Jan 28 12:50:17 crc kubenswrapper[4685]: E0128 12:50:17.214383 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51\": container with ID starting with 17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51 not found: ID does not exist" containerID="17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.214405 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51"} err="failed to get container status \"17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51\": rpc error: code = NotFound desc = could not find container \"17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51\": container with ID starting with 17b31a5dd324adf72d800ea7bf17ee28d1b5f61b83351577e6c8dd51b2775f51 not found: ID does not exist" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.522842 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-74b0-account-create-update-n2xpz"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.535196 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-create-6dg77"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.541310 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-74b0-account-create-update-n2xpz"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.550119 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance74b0-account-delete-hbt4h"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.562589 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-create-6dg77"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.569393 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance74b0-account-delete-hbt4h"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.969550 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-create-jnz4l"] Jan 28 12:50:17 crc kubenswrapper[4685]: E0128 12:50:17.969850 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="registry-server" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.969866 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="registry-server" Jan 28 12:50:17 crc kubenswrapper[4685]: E0128 12:50:17.969878 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd" containerName="mariadb-account-delete" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.969886 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd" containerName="mariadb-account-delete" Jan 28 12:50:17 crc kubenswrapper[4685]: E0128 12:50:17.969918 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="extract-utilities" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.969930 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="extract-utilities" Jan 28 12:50:17 crc kubenswrapper[4685]: E0128 12:50:17.969943 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="extract-content" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.969950 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="extract-content" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.970080 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" containerName="registry-server" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.970102 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd" containerName="mariadb-account-delete" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.970615 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.977249 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-jnz4l"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.983821 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-9468-account-create-update-lmsmx"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.984870 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.986268 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqks6\" (UniqueName: \"kubernetes.io/projected/147e880f-0cd2-4f1d-9468-ed276cf35ead-kube-api-access-vqks6\") pod \"glance-db-create-jnz4l\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.986303 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c4efda-c923-4818-83c5-e44803358cd8-operator-scripts\") pod \"glance-9468-account-create-update-lmsmx\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.986374 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swxdr\" (UniqueName: \"kubernetes.io/projected/e2c4efda-c923-4818-83c5-e44803358cd8-kube-api-access-swxdr\") pod \"glance-9468-account-create-update-lmsmx\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.986428 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/147e880f-0cd2-4f1d-9468-ed276cf35ead-operator-scripts\") pod \"glance-db-create-jnz4l\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.989969 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-9468-account-create-update-lmsmx"] Jan 28 12:50:17 crc kubenswrapper[4685]: I0128 12:50:17.990058 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-db-secret" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.088538 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swxdr\" (UniqueName: \"kubernetes.io/projected/e2c4efda-c923-4818-83c5-e44803358cd8-kube-api-access-swxdr\") pod \"glance-9468-account-create-update-lmsmx\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.088926 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/147e880f-0cd2-4f1d-9468-ed276cf35ead-operator-scripts\") pod \"glance-db-create-jnz4l\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.089067 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqks6\" (UniqueName: \"kubernetes.io/projected/147e880f-0cd2-4f1d-9468-ed276cf35ead-kube-api-access-vqks6\") pod \"glance-db-create-jnz4l\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.089162 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c4efda-c923-4818-83c5-e44803358cd8-operator-scripts\") pod \"glance-9468-account-create-update-lmsmx\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.089678 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/147e880f-0cd2-4f1d-9468-ed276cf35ead-operator-scripts\") pod \"glance-db-create-jnz4l\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.089767 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c4efda-c923-4818-83c5-e44803358cd8-operator-scripts\") pod \"glance-9468-account-create-update-lmsmx\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.105837 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swxdr\" (UniqueName: \"kubernetes.io/projected/e2c4efda-c923-4818-83c5-e44803358cd8-kube-api-access-swxdr\") pod \"glance-9468-account-create-update-lmsmx\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.106293 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqks6\" (UniqueName: \"kubernetes.io/projected/147e880f-0cd2-4f1d-9468-ed276cf35ead-kube-api-access-vqks6\") pod \"glance-db-create-jnz4l\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.283775 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.298795 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.556092 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15ce0167-5fd5-4cf1-8505-8e2090fa5da0" path="/var/lib/kubelet/pods/15ce0167-5fd5-4cf1-8505-8e2090fa5da0/volumes" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.557658 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd" path="/var/lib/kubelet/pods/1b4e88ed-5aa8-4b8d-93a6-50fbcecd01fd/volumes" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.558378 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e" path="/var/lib/kubelet/pods/2f93fbd2-b7fc-4aa8-a98a-eff0e2ef0e7e/volumes" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.559598 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2e82a53-2154-464c-b908-01c0a787d309" path="/var/lib/kubelet/pods/f2e82a53-2154-464c-b908-01c0a787d309/volumes" Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.711623 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-jnz4l"] Jan 28 12:50:18 crc kubenswrapper[4685]: I0128 12:50:18.775863 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-9468-account-create-update-lmsmx"] Jan 28 12:50:19 crc kubenswrapper[4685]: I0128 12:50:19.151628 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" event={"ID":"e2c4efda-c923-4818-83c5-e44803358cd8","Type":"ContainerStarted","Data":"6b28877cc2816532cbc2020dea0cd2ab4532b6ec08d813eca9589228f7ae4fce"} Jan 28 12:50:19 crc kubenswrapper[4685]: I0128 12:50:19.151673 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" event={"ID":"e2c4efda-c923-4818-83c5-e44803358cd8","Type":"ContainerStarted","Data":"e8216dc7437889f63d7c9700f9312ea55599cce4c3a89865fa8ea27d323c7bcc"} Jan 28 12:50:19 crc kubenswrapper[4685]: I0128 12:50:19.154876 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-jnz4l" event={"ID":"147e880f-0cd2-4f1d-9468-ed276cf35ead","Type":"ContainerStarted","Data":"1ed467b0f59b8213320e6ec49be1c4fc34787d685c8075ca1cc281174d53cadb"} Jan 28 12:50:19 crc kubenswrapper[4685]: I0128 12:50:19.154973 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-jnz4l" event={"ID":"147e880f-0cd2-4f1d-9468-ed276cf35ead","Type":"ContainerStarted","Data":"25e22e046465e919271800ca441be75d9ef139addc2cf8046bd55b099c2dea3e"} Jan 28 12:50:19 crc kubenswrapper[4685]: I0128 12:50:19.171487 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" podStartSLOduration=2.171464365 podStartE2EDuration="2.171464365s" podCreationTimestamp="2026-01-28 12:50:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:19.168954694 +0000 UTC m=+1770.256368549" watchObservedRunningTime="2026-01-28 12:50:19.171464365 +0000 UTC m=+1770.258878200" Jan 28 12:50:19 crc kubenswrapper[4685]: I0128 12:50:19.193929 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-create-jnz4l" podStartSLOduration=2.193904029 podStartE2EDuration="2.193904029s" podCreationTimestamp="2026-01-28 12:50:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:19.183005531 +0000 UTC m=+1770.270419366" watchObservedRunningTime="2026-01-28 12:50:19.193904029 +0000 UTC m=+1770.281317864" Jan 28 12:50:19 crc kubenswrapper[4685]: I0128 12:50:19.546425 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:50:19 crc kubenswrapper[4685]: E0128 12:50:19.546636 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:50:20 crc kubenswrapper[4685]: I0128 12:50:20.164033 4685 generic.go:334] "Generic (PLEG): container finished" podID="147e880f-0cd2-4f1d-9468-ed276cf35ead" containerID="1ed467b0f59b8213320e6ec49be1c4fc34787d685c8075ca1cc281174d53cadb" exitCode=0 Jan 28 12:50:20 crc kubenswrapper[4685]: I0128 12:50:20.164232 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-jnz4l" event={"ID":"147e880f-0cd2-4f1d-9468-ed276cf35ead","Type":"ContainerDied","Data":"1ed467b0f59b8213320e6ec49be1c4fc34787d685c8075ca1cc281174d53cadb"} Jan 28 12:50:20 crc kubenswrapper[4685]: I0128 12:50:20.165788 4685 generic.go:334] "Generic (PLEG): container finished" podID="e2c4efda-c923-4818-83c5-e44803358cd8" containerID="6b28877cc2816532cbc2020dea0cd2ab4532b6ec08d813eca9589228f7ae4fce" exitCode=0 Jan 28 12:50:20 crc kubenswrapper[4685]: I0128 12:50:20.165821 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" event={"ID":"e2c4efda-c923-4818-83c5-e44803358cd8","Type":"ContainerDied","Data":"6b28877cc2816532cbc2020dea0cd2ab4532b6ec08d813eca9589228f7ae4fce"} Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.506159 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.513945 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.641335 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqks6\" (UniqueName: \"kubernetes.io/projected/147e880f-0cd2-4f1d-9468-ed276cf35ead-kube-api-access-vqks6\") pod \"147e880f-0cd2-4f1d-9468-ed276cf35ead\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.641403 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swxdr\" (UniqueName: \"kubernetes.io/projected/e2c4efda-c923-4818-83c5-e44803358cd8-kube-api-access-swxdr\") pod \"e2c4efda-c923-4818-83c5-e44803358cd8\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.641429 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c4efda-c923-4818-83c5-e44803358cd8-operator-scripts\") pod \"e2c4efda-c923-4818-83c5-e44803358cd8\" (UID: \"e2c4efda-c923-4818-83c5-e44803358cd8\") " Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.641515 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/147e880f-0cd2-4f1d-9468-ed276cf35ead-operator-scripts\") pod \"147e880f-0cd2-4f1d-9468-ed276cf35ead\" (UID: \"147e880f-0cd2-4f1d-9468-ed276cf35ead\") " Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.642211 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/147e880f-0cd2-4f1d-9468-ed276cf35ead-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "147e880f-0cd2-4f1d-9468-ed276cf35ead" (UID: "147e880f-0cd2-4f1d-9468-ed276cf35ead"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.642593 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2c4efda-c923-4818-83c5-e44803358cd8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e2c4efda-c923-4818-83c5-e44803358cd8" (UID: "e2c4efda-c923-4818-83c5-e44803358cd8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.646543 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/147e880f-0cd2-4f1d-9468-ed276cf35ead-kube-api-access-vqks6" (OuterVolumeSpecName: "kube-api-access-vqks6") pod "147e880f-0cd2-4f1d-9468-ed276cf35ead" (UID: "147e880f-0cd2-4f1d-9468-ed276cf35ead"). InnerVolumeSpecName "kube-api-access-vqks6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.649650 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2c4efda-c923-4818-83c5-e44803358cd8-kube-api-access-swxdr" (OuterVolumeSpecName: "kube-api-access-swxdr") pod "e2c4efda-c923-4818-83c5-e44803358cd8" (UID: "e2c4efda-c923-4818-83c5-e44803358cd8"). InnerVolumeSpecName "kube-api-access-swxdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.744782 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqks6\" (UniqueName: \"kubernetes.io/projected/147e880f-0cd2-4f1d-9468-ed276cf35ead-kube-api-access-vqks6\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.744822 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swxdr\" (UniqueName: \"kubernetes.io/projected/e2c4efda-c923-4818-83c5-e44803358cd8-kube-api-access-swxdr\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.744842 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c4efda-c923-4818-83c5-e44803358cd8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:21 crc kubenswrapper[4685]: I0128 12:50:21.744854 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/147e880f-0cd2-4f1d-9468-ed276cf35ead-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:22 crc kubenswrapper[4685]: I0128 12:50:22.182193 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" Jan 28 12:50:22 crc kubenswrapper[4685]: I0128 12:50:22.182192 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-9468-account-create-update-lmsmx" event={"ID":"e2c4efda-c923-4818-83c5-e44803358cd8","Type":"ContainerDied","Data":"e8216dc7437889f63d7c9700f9312ea55599cce4c3a89865fa8ea27d323c7bcc"} Jan 28 12:50:22 crc kubenswrapper[4685]: I0128 12:50:22.182355 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8216dc7437889f63d7c9700f9312ea55599cce4c3a89865fa8ea27d323c7bcc" Jan 28 12:50:22 crc kubenswrapper[4685]: I0128 12:50:22.183928 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-jnz4l" Jan 28 12:50:22 crc kubenswrapper[4685]: I0128 12:50:22.183918 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-jnz4l" event={"ID":"147e880f-0cd2-4f1d-9468-ed276cf35ead","Type":"ContainerDied","Data":"25e22e046465e919271800ca441be75d9ef139addc2cf8046bd55b099c2dea3e"} Jan 28 12:50:22 crc kubenswrapper[4685]: I0128 12:50:22.184053 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25e22e046465e919271800ca441be75d9ef139addc2cf8046bd55b099c2dea3e" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.208711 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-kxlwl"] Jan 28 12:50:23 crc kubenswrapper[4685]: E0128 12:50:23.209031 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="147e880f-0cd2-4f1d-9468-ed276cf35ead" containerName="mariadb-database-create" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.209049 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="147e880f-0cd2-4f1d-9468-ed276cf35ead" containerName="mariadb-database-create" Jan 28 12:50:23 crc kubenswrapper[4685]: E0128 12:50:23.209075 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c4efda-c923-4818-83c5-e44803358cd8" containerName="mariadb-account-create-update" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.209084 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c4efda-c923-4818-83c5-e44803358cd8" containerName="mariadb-account-create-update" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.209269 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c4efda-c923-4818-83c5-e44803358cd8" containerName="mariadb-account-create-update" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.209286 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="147e880f-0cd2-4f1d-9468-ed276cf35ead" containerName="mariadb-database-create" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.209827 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.212409 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-f6w27" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.213796 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.219159 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-kxlwl"] Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.288828 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-config-data\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.288890 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7f6s\" (UniqueName: \"kubernetes.io/projected/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-kube-api-access-l7f6s\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.288918 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-db-sync-config-data\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.390025 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-config-data\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.390091 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7f6s\" (UniqueName: \"kubernetes.io/projected/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-kube-api-access-l7f6s\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.390123 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-db-sync-config-data\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.393898 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-db-sync-config-data\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.394130 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-config-data\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.421115 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7f6s\" (UniqueName: \"kubernetes.io/projected/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-kube-api-access-l7f6s\") pod \"glance-db-sync-kxlwl\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.525683 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:23 crc kubenswrapper[4685]: I0128 12:50:23.960846 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-kxlwl"] Jan 28 12:50:24 crc kubenswrapper[4685]: I0128 12:50:24.199033 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-kxlwl" event={"ID":"f2c27abf-bd6d-467c-aa79-b0c267c57c3b","Type":"ContainerStarted","Data":"9578cc9ffdca5f9afd0923d59634cb6d34d897f3d9e58918a2ac52c2ed137d23"} Jan 28 12:50:25 crc kubenswrapper[4685]: I0128 12:50:25.207973 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-kxlwl" event={"ID":"f2c27abf-bd6d-467c-aa79-b0c267c57c3b","Type":"ContainerStarted","Data":"9d8e4d57e3173ecb8767869a6018812a51c2c816324f2d83ec5098504d828e30"} Jan 28 12:50:25 crc kubenswrapper[4685]: I0128 12:50:25.228224 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-kxlwl" podStartSLOduration=2.228199438 podStartE2EDuration="2.228199438s" podCreationTimestamp="2026-01-28 12:50:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:25.222050034 +0000 UTC m=+1776.309463869" watchObservedRunningTime="2026-01-28 12:50:25.228199438 +0000 UTC m=+1776.315613293" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.272526 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qzkrj"] Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.274710 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.285330 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qzkrj"] Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.436273 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-utilities\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.436676 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-catalog-content\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.436827 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42mkm\" (UniqueName: \"kubernetes.io/projected/a39d59dd-42b2-4da5-b205-12db78b4c405-kube-api-access-42mkm\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.537888 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-catalog-content\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.538111 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42mkm\" (UniqueName: \"kubernetes.io/projected/a39d59dd-42b2-4da5-b205-12db78b4c405-kube-api-access-42mkm\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.538236 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-utilities\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.538409 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-catalog-content\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.538730 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-utilities\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.558988 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42mkm\" (UniqueName: \"kubernetes.io/projected/a39d59dd-42b2-4da5-b205-12db78b4c405-kube-api-access-42mkm\") pod \"certified-operators-qzkrj\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:26 crc kubenswrapper[4685]: I0128 12:50:26.599842 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:27 crc kubenswrapper[4685]: I0128 12:50:27.195285 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qzkrj"] Jan 28 12:50:27 crc kubenswrapper[4685]: W0128 12:50:27.206432 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda39d59dd_42b2_4da5_b205_12db78b4c405.slice/crio-79b3434146d9078482b0591a5de70567f3330d9f17af798acb302b1f4f5e107c WatchSource:0}: Error finding container 79b3434146d9078482b0591a5de70567f3330d9f17af798acb302b1f4f5e107c: Status 404 returned error can't find the container with id 79b3434146d9078482b0591a5de70567f3330d9f17af798acb302b1f4f5e107c Jan 28 12:50:27 crc kubenswrapper[4685]: I0128 12:50:27.222938 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzkrj" event={"ID":"a39d59dd-42b2-4da5-b205-12db78b4c405","Type":"ContainerStarted","Data":"79b3434146d9078482b0591a5de70567f3330d9f17af798acb302b1f4f5e107c"} Jan 28 12:50:28 crc kubenswrapper[4685]: I0128 12:50:28.232027 4685 generic.go:334] "Generic (PLEG): container finished" podID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerID="e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f" exitCode=0 Jan 28 12:50:28 crc kubenswrapper[4685]: I0128 12:50:28.232138 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzkrj" event={"ID":"a39d59dd-42b2-4da5-b205-12db78b4c405","Type":"ContainerDied","Data":"e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f"} Jan 28 12:50:28 crc kubenswrapper[4685]: I0128 12:50:28.234071 4685 generic.go:334] "Generic (PLEG): container finished" podID="f2c27abf-bd6d-467c-aa79-b0c267c57c3b" containerID="9d8e4d57e3173ecb8767869a6018812a51c2c816324f2d83ec5098504d828e30" exitCode=0 Jan 28 12:50:28 crc kubenswrapper[4685]: I0128 12:50:28.234134 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-kxlwl" event={"ID":"f2c27abf-bd6d-467c-aa79-b0c267c57c3b","Type":"ContainerDied","Data":"9d8e4d57e3173ecb8767869a6018812a51c2c816324f2d83ec5098504d828e30"} Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.243585 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzkrj" event={"ID":"a39d59dd-42b2-4da5-b205-12db78b4c405","Type":"ContainerStarted","Data":"b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934"} Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.535692 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.684902 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7f6s\" (UniqueName: \"kubernetes.io/projected/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-kube-api-access-l7f6s\") pod \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.684989 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-db-sync-config-data\") pod \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.685071 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-config-data\") pod \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\" (UID: \"f2c27abf-bd6d-467c-aa79-b0c267c57c3b\") " Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.690651 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-kube-api-access-l7f6s" (OuterVolumeSpecName: "kube-api-access-l7f6s") pod "f2c27abf-bd6d-467c-aa79-b0c267c57c3b" (UID: "f2c27abf-bd6d-467c-aa79-b0c267c57c3b"). InnerVolumeSpecName "kube-api-access-l7f6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.691336 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f2c27abf-bd6d-467c-aa79-b0c267c57c3b" (UID: "f2c27abf-bd6d-467c-aa79-b0c267c57c3b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.721892 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-config-data" (OuterVolumeSpecName: "config-data") pod "f2c27abf-bd6d-467c-aa79-b0c267c57c3b" (UID: "f2c27abf-bd6d-467c-aa79-b0c267c57c3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.788532 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7f6s\" (UniqueName: \"kubernetes.io/projected/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-kube-api-access-l7f6s\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.788571 4685 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:29 crc kubenswrapper[4685]: I0128 12:50:29.788585 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2c27abf-bd6d-467c-aa79-b0c267c57c3b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:30 crc kubenswrapper[4685]: I0128 12:50:30.253468 4685 generic.go:334] "Generic (PLEG): container finished" podID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerID="b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934" exitCode=0 Jan 28 12:50:30 crc kubenswrapper[4685]: I0128 12:50:30.253540 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzkrj" event={"ID":"a39d59dd-42b2-4da5-b205-12db78b4c405","Type":"ContainerDied","Data":"b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934"} Jan 28 12:50:30 crc kubenswrapper[4685]: I0128 12:50:30.255041 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-kxlwl" event={"ID":"f2c27abf-bd6d-467c-aa79-b0c267c57c3b","Type":"ContainerDied","Data":"9578cc9ffdca5f9afd0923d59634cb6d34d897f3d9e58918a2ac52c2ed137d23"} Jan 28 12:50:30 crc kubenswrapper[4685]: I0128 12:50:30.255415 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9578cc9ffdca5f9afd0923d59634cb6d34d897f3d9e58918a2ac52c2ed137d23" Jan 28 12:50:30 crc kubenswrapper[4685]: I0128 12:50:30.255065 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-kxlwl" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.265303 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzkrj" event={"ID":"a39d59dd-42b2-4da5-b205-12db78b4c405","Type":"ContainerStarted","Data":"b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456"} Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.299198 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qzkrj" podStartSLOduration=2.653727357 podStartE2EDuration="5.299160713s" podCreationTimestamp="2026-01-28 12:50:26 +0000 UTC" firstStartedPulling="2026-01-28 12:50:28.233668062 +0000 UTC m=+1779.321081917" lastFinishedPulling="2026-01-28 12:50:30.879101438 +0000 UTC m=+1781.966515273" observedRunningTime="2026-01-28 12:50:31.292498495 +0000 UTC m=+1782.379912330" watchObservedRunningTime="2026-01-28 12:50:31.299160713 +0000 UTC m=+1782.386574548" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.415195 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:50:31 crc kubenswrapper[4685]: E0128 12:50:31.415463 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2c27abf-bd6d-467c-aa79-b0c267c57c3b" containerName="glance-db-sync" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.415477 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2c27abf-bd6d-467c-aa79-b0c267c57c3b" containerName="glance-db-sync" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.415610 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2c27abf-bd6d-467c-aa79-b0c267c57c3b" containerName="glance-db-sync" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.416357 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.417803 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-f6w27" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.418137 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.418459 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-external-config-data" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.430794 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511574 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511616 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-scripts\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511648 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511666 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511795 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-dev\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511822 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511858 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-logs\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511878 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511895 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-run\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.511999 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.512104 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-sys\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.512153 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfbtq\" (UniqueName: \"kubernetes.io/projected/4c0d265d-5416-4aab-b089-1b9a8356fb92-kube-api-access-xfbtq\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.512247 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-config-data\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.512267 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613057 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-sys\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613106 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfbtq\" (UniqueName: \"kubernetes.io/projected/4c0d265d-5416-4aab-b089-1b9a8356fb92-kube-api-access-xfbtq\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613131 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-config-data\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613157 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613207 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613223 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-scripts\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613249 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613265 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613287 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-dev\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613305 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613323 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-logs\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613341 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613356 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-run\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613388 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613473 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.613508 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-sys\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.614621 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.614884 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") device mount path \"/mnt/openstack/pv18\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.614930 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.614956 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.614995 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-dev\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.615003 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-run\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.615012 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-logs\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.614886 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") device mount path \"/mnt/openstack/pv14\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.615031 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.621917 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-config-data\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.622912 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-scripts\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.637564 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.637543 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.640592 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfbtq\" (UniqueName: \"kubernetes.io/projected/4c0d265d-5416-4aab-b089-1b9a8356fb92-kube-api-access-xfbtq\") pod \"glance-default-external-api-0\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.660395 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.661848 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.670471 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.670991 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-internal-config-data" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.732910 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.815574 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.815917 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.815946 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.815984 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-run\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816015 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-sys\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816044 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p56fq\" (UniqueName: \"kubernetes.io/projected/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-kube-api-access-p56fq\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816096 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-dev\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816123 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816146 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816206 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816331 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816460 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816489 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.816525 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-logs\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917351 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917398 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917413 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917436 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-run\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917458 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-sys\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917477 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p56fq\" (UniqueName: \"kubernetes.io/projected/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-kube-api-access-p56fq\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917521 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-dev\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917549 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917568 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917593 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917607 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917627 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917643 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.917663 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-logs\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918125 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-logs\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918232 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918259 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918289 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918309 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-run\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918328 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-sys\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918571 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-dev\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.918769 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.919278 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.919565 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") device mount path \"/mnt/openstack/pv02\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.920042 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") device mount path \"/mnt/openstack/pv19\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.931288 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.932790 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.936837 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p56fq\" (UniqueName: \"kubernetes.io/projected/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-kube-api-access-p56fq\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.970089 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.985207 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:50:31 crc kubenswrapper[4685]: W0128 12:50:31.987339 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c0d265d_5416_4aab_b089_1b9a8356fb92.slice/crio-bb1c2db715a93321bc7b02ae5a641ae05ff79c7f783729da4ec5345b3d3a03b7 WatchSource:0}: Error finding container bb1c2db715a93321bc7b02ae5a641ae05ff79c7f783729da4ec5345b3d3a03b7: Status 404 returned error can't find the container with id bb1c2db715a93321bc7b02ae5a641ae05ff79c7f783729da4ec5345b3d3a03b7 Jan 28 12:50:31 crc kubenswrapper[4685]: I0128 12:50:31.993878 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:32 crc kubenswrapper[4685]: I0128 12:50:32.000758 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:32 crc kubenswrapper[4685]: I0128 12:50:32.276305 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"4c0d265d-5416-4aab-b089-1b9a8356fb92","Type":"ContainerStarted","Data":"09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd"} Jan 28 12:50:32 crc kubenswrapper[4685]: I0128 12:50:32.276549 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"4c0d265d-5416-4aab-b089-1b9a8356fb92","Type":"ContainerStarted","Data":"bb1c2db715a93321bc7b02ae5a641ae05ff79c7f783729da4ec5345b3d3a03b7"} Jan 28 12:50:32 crc kubenswrapper[4685]: I0128 12:50:32.463636 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:32 crc kubenswrapper[4685]: W0128 12:50:32.470030 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fa5cec4_cd9c_47ee_bc27_af0f9c8723dc.slice/crio-4c11d133f95986e7baabe2280442d9d0ff28d661d36ebe93fd4dfeec2dfe171f WatchSource:0}: Error finding container 4c11d133f95986e7baabe2280442d9d0ff28d661d36ebe93fd4dfeec2dfe171f: Status 404 returned error can't find the container with id 4c11d133f95986e7baabe2280442d9d0ff28d661d36ebe93fd4dfeec2dfe171f Jan 28 12:50:32 crc kubenswrapper[4685]: I0128 12:50:32.562709 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.287800 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"4c0d265d-5416-4aab-b089-1b9a8356fb92","Type":"ContainerStarted","Data":"46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880"} Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.289915 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc","Type":"ContainerStarted","Data":"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903"} Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.289961 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc","Type":"ContainerStarted","Data":"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5"} Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.289976 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc","Type":"ContainerStarted","Data":"4c11d133f95986e7baabe2280442d9d0ff28d661d36ebe93fd4dfeec2dfe171f"} Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.290097 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-log" containerID="cri-o://2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5" gracePeriod=30 Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.290419 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-httpd" containerID="cri-o://98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903" gracePeriod=30 Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.312712 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-0" podStartSLOduration=2.312694796 podStartE2EDuration="2.312694796s" podCreationTimestamp="2026-01-28 12:50:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:33.309472125 +0000 UTC m=+1784.396885960" watchObservedRunningTime="2026-01-28 12:50:33.312694796 +0000 UTC m=+1784.400108631" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.353532 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=3.35351233 podStartE2EDuration="3.35351233s" podCreationTimestamp="2026-01-28 12:50:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:33.345671428 +0000 UTC m=+1784.433085263" watchObservedRunningTime="2026-01-28 12:50:33.35351233 +0000 UTC m=+1784.440926165" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.744099 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.848912 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-var-locks-brick\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.848957 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-nvme\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.848977 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849002 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-iscsi\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849046 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849075 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-sys\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849091 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849104 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p56fq\" (UniqueName: \"kubernetes.io/projected/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-kube-api-access-p56fq\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849114 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-sys" (OuterVolumeSpecName: "sys") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849160 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-lib-modules\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849229 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-config-data\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849284 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-dev\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849316 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-httpd-run\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849380 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-dev" (OuterVolumeSpecName: "dev") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849377 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849430 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849535 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-scripts\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849566 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-logs\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849929 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-run\") pod \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\" (UID: \"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc\") " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849598 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849604 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.849767 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-logs" (OuterVolumeSpecName: "logs") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850112 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-run" (OuterVolumeSpecName: "run") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850272 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850286 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850294 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850302 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850310 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850318 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850327 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850336 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.850343 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.853891 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-kube-api-access-p56fq" (OuterVolumeSpecName: "kube-api-access-p56fq") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "kube-api-access-p56fq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.854091 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.854211 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage19-crc" (OuterVolumeSpecName: "glance-cache") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "local-storage19-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.865265 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-scripts" (OuterVolumeSpecName: "scripts") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.882600 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-config-data" (OuterVolumeSpecName: "config-data") pod "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" (UID: "4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.951844 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.951904 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.951916 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.951933 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") on node \"crc\" " Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.951945 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p56fq\" (UniqueName: \"kubernetes.io/projected/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc-kube-api-access-p56fq\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.967490 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 12:50:33 crc kubenswrapper[4685]: I0128 12:50:33.968005 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage19-crc" (UniqueName: "kubernetes.io/local-volume/local-storage19-crc") on node "crc" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.053098 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.053127 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.300012 4685 generic.go:334] "Generic (PLEG): container finished" podID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerID="98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903" exitCode=143 Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.300050 4685 generic.go:334] "Generic (PLEG): container finished" podID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerID="2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5" exitCode=143 Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.300073 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.300112 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc","Type":"ContainerDied","Data":"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903"} Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.300188 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc","Type":"ContainerDied","Data":"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5"} Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.300206 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc","Type":"ContainerDied","Data":"4c11d133f95986e7baabe2280442d9d0ff28d661d36ebe93fd4dfeec2dfe171f"} Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.300225 4685 scope.go:117] "RemoveContainer" containerID="98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.330236 4685 scope.go:117] "RemoveContainer" containerID="2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.332569 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.341623 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.362631 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:34 crc kubenswrapper[4685]: E0128 12:50:34.362969 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-log" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.362985 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-log" Jan 28 12:50:34 crc kubenswrapper[4685]: E0128 12:50:34.363012 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-httpd" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.363021 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-httpd" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.363216 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-log" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.363231 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" containerName="glance-httpd" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.363407 4685 scope.go:117] "RemoveContainer" containerID="98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.364246 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: E0128 12:50:34.364638 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903\": container with ID starting with 98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903 not found: ID does not exist" containerID="98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.364674 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903"} err="failed to get container status \"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903\": rpc error: code = NotFound desc = could not find container \"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903\": container with ID starting with 98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903 not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.364698 4685 scope.go:117] "RemoveContainer" containerID="2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5" Jan 28 12:50:34 crc kubenswrapper[4685]: E0128 12:50:34.365261 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5\": container with ID starting with 2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5 not found: ID does not exist" containerID="2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.365300 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5"} err="failed to get container status \"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5\": rpc error: code = NotFound desc = could not find container \"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5\": container with ID starting with 2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5 not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.365321 4685 scope.go:117] "RemoveContainer" containerID="98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.365689 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903"} err="failed to get container status \"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903\": rpc error: code = NotFound desc = could not find container \"98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903\": container with ID starting with 98a225226e301495e5696dd7a7914656919e8c3bcf1aa08a0cec5a0a8a67a903 not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.365713 4685 scope.go:117] "RemoveContainer" containerID="2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.366112 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5"} err="failed to get container status \"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5\": rpc error: code = NotFound desc = could not find container \"2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5\": container with ID starting with 2cbc18da298b3f5f994aa16857cb8aed10244a6a624da3528913f5bd6bc19bf5 not found: ID does not exist" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.368648 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-internal-config-data" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.394660 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.458728 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.458780 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-scripts\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.458808 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-sys\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.458886 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459050 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-dev\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459157 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459294 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459403 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459445 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459493 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-config-data\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459529 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459568 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-logs\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459610 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-run\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.459632 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kndj\" (UniqueName: \"kubernetes.io/projected/77743e75-2a16-43c9-9d38-a38a69bda189-kube-api-access-4kndj\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.546029 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:50:34 crc kubenswrapper[4685]: E0128 12:50:34.546322 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.558474 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc" path="/var/lib/kubelet/pods/4fa5cec4-cd9c-47ee-bc27-af0f9c8723dc/volumes" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561322 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561379 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561413 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561437 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-config-data\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561462 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561483 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-logs\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561506 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-run\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561524 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kndj\" (UniqueName: \"kubernetes.io/projected/77743e75-2a16-43c9-9d38-a38a69bda189-kube-api-access-4kndj\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561545 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561562 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-scripts\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561561 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561608 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-sys\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561611 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") device mount path \"/mnt/openstack/pv19\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561582 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-sys\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561488 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561707 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-run\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561739 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") device mount path \"/mnt/openstack/pv02\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561757 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561897 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-dev\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561968 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.561981 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-logs\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.562061 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-dev\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.562109 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.562231 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.562306 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.566310 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-scripts\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.567677 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-config-data\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.582341 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kndj\" (UniqueName: \"kubernetes.io/projected/77743e75-2a16-43c9-9d38-a38a69bda189-kube-api-access-4kndj\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.585632 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.604340 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:34 crc kubenswrapper[4685]: I0128 12:50:34.683769 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:35 crc kubenswrapper[4685]: I0128 12:50:35.099059 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:50:35 crc kubenswrapper[4685]: I0128 12:50:35.313370 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"77743e75-2a16-43c9-9d38-a38a69bda189","Type":"ContainerStarted","Data":"9886f3be435b86f11e3fb0ba6aa81563d78d81872cbf05b6283a2df24f9af9fd"} Jan 28 12:50:35 crc kubenswrapper[4685]: I0128 12:50:35.313406 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"77743e75-2a16-43c9-9d38-a38a69bda189","Type":"ContainerStarted","Data":"bd7a7958704853c4b0ce2f5092fdd457b32a83f4890fb207edd0509582ea0b94"} Jan 28 12:50:36 crc kubenswrapper[4685]: I0128 12:50:36.321727 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"77743e75-2a16-43c9-9d38-a38a69bda189","Type":"ContainerStarted","Data":"1d0f87b7c9e13938d9827df9d7e56a36f908d01d8d71150375bb2a13da302ade"} Jan 28 12:50:36 crc kubenswrapper[4685]: I0128 12:50:36.352411 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=2.352387047 podStartE2EDuration="2.352387047s" podCreationTimestamp="2026-01-28 12:50:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:36.343303691 +0000 UTC m=+1787.430717606" watchObservedRunningTime="2026-01-28 12:50:36.352387047 +0000 UTC m=+1787.439800882" Jan 28 12:50:36 crc kubenswrapper[4685]: I0128 12:50:36.601131 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:36 crc kubenswrapper[4685]: I0128 12:50:36.601200 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:36 crc kubenswrapper[4685]: I0128 12:50:36.720758 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:37 crc kubenswrapper[4685]: I0128 12:50:37.372895 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:37 crc kubenswrapper[4685]: I0128 12:50:37.421063 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qzkrj"] Jan 28 12:50:39 crc kubenswrapper[4685]: I0128 12:50:39.345130 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qzkrj" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="registry-server" containerID="cri-o://b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456" gracePeriod=2 Jan 28 12:50:39 crc kubenswrapper[4685]: I0128 12:50:39.857971 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:39 crc kubenswrapper[4685]: I0128 12:50:39.942621 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-utilities\") pod \"a39d59dd-42b2-4da5-b205-12db78b4c405\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " Jan 28 12:50:39 crc kubenswrapper[4685]: I0128 12:50:39.942735 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42mkm\" (UniqueName: \"kubernetes.io/projected/a39d59dd-42b2-4da5-b205-12db78b4c405-kube-api-access-42mkm\") pod \"a39d59dd-42b2-4da5-b205-12db78b4c405\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " Jan 28 12:50:39 crc kubenswrapper[4685]: I0128 12:50:39.942828 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-catalog-content\") pod \"a39d59dd-42b2-4da5-b205-12db78b4c405\" (UID: \"a39d59dd-42b2-4da5-b205-12db78b4c405\") " Jan 28 12:50:39 crc kubenswrapper[4685]: I0128 12:50:39.944377 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-utilities" (OuterVolumeSpecName: "utilities") pod "a39d59dd-42b2-4da5-b205-12db78b4c405" (UID: "a39d59dd-42b2-4da5-b205-12db78b4c405"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:39 crc kubenswrapper[4685]: I0128 12:50:39.958453 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a39d59dd-42b2-4da5-b205-12db78b4c405-kube-api-access-42mkm" (OuterVolumeSpecName: "kube-api-access-42mkm") pod "a39d59dd-42b2-4da5-b205-12db78b4c405" (UID: "a39d59dd-42b2-4da5-b205-12db78b4c405"). InnerVolumeSpecName "kube-api-access-42mkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.044134 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.044186 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42mkm\" (UniqueName: \"kubernetes.io/projected/a39d59dd-42b2-4da5-b205-12db78b4c405-kube-api-access-42mkm\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.359832 4685 generic.go:334] "Generic (PLEG): container finished" podID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerID="b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456" exitCode=0 Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.359873 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzkrj" event={"ID":"a39d59dd-42b2-4da5-b205-12db78b4c405","Type":"ContainerDied","Data":"b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456"} Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.360133 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzkrj" event={"ID":"a39d59dd-42b2-4da5-b205-12db78b4c405","Type":"ContainerDied","Data":"79b3434146d9078482b0591a5de70567f3330d9f17af798acb302b1f4f5e107c"} Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.360154 4685 scope.go:117] "RemoveContainer" containerID="b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.359989 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzkrj" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.383874 4685 scope.go:117] "RemoveContainer" containerID="b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.413372 4685 scope.go:117] "RemoveContainer" containerID="e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.449071 4685 scope.go:117] "RemoveContainer" containerID="b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456" Jan 28 12:50:40 crc kubenswrapper[4685]: E0128 12:50:40.449634 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456\": container with ID starting with b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456 not found: ID does not exist" containerID="b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.449679 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456"} err="failed to get container status \"b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456\": rpc error: code = NotFound desc = could not find container \"b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456\": container with ID starting with b32063023e2973bc74e9d74e51923898c5b801347892752177c0143412169456 not found: ID does not exist" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.449705 4685 scope.go:117] "RemoveContainer" containerID="b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934" Jan 28 12:50:40 crc kubenswrapper[4685]: E0128 12:50:40.450095 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934\": container with ID starting with b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934 not found: ID does not exist" containerID="b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.450134 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934"} err="failed to get container status \"b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934\": rpc error: code = NotFound desc = could not find container \"b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934\": container with ID starting with b443a6a9b08d7aef1a215c2ff34b33a81bebb0057ee3a6b12fc6d52590cc8934 not found: ID does not exist" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.450150 4685 scope.go:117] "RemoveContainer" containerID="e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f" Jan 28 12:50:40 crc kubenswrapper[4685]: E0128 12:50:40.451403 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f\": container with ID starting with e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f not found: ID does not exist" containerID="e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f" Jan 28 12:50:40 crc kubenswrapper[4685]: I0128 12:50:40.451425 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f"} err="failed to get container status \"e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f\": rpc error: code = NotFound desc = could not find container \"e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f\": container with ID starting with e43ff71a9fbaaae5b48d36d63002804e2bca8c4ea429027f539a07e7d6907e9f not found: ID does not exist" Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.062774 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a39d59dd-42b2-4da5-b205-12db78b4c405" (UID: "a39d59dd-42b2-4da5-b205-12db78b4c405"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.063863 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a39d59dd-42b2-4da5-b205-12db78b4c405-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.288942 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qzkrj"] Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.295100 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qzkrj"] Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.733695 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.733773 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.760687 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:41 crc kubenswrapper[4685]: I0128 12:50:41.771012 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:42 crc kubenswrapper[4685]: I0128 12:50:42.379001 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:42 crc kubenswrapper[4685]: I0128 12:50:42.379364 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:42 crc kubenswrapper[4685]: I0128 12:50:42.556066 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" path="/var/lib/kubelet/pods/a39d59dd-42b2-4da5-b205-12db78b4c405/volumes" Jan 28 12:50:44 crc kubenswrapper[4685]: I0128 12:50:44.390552 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:44 crc kubenswrapper[4685]: I0128 12:50:44.391227 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:50:44 crc kubenswrapper[4685]: I0128 12:50:44.684246 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:44 crc kubenswrapper[4685]: I0128 12:50:44.684710 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:44 crc kubenswrapper[4685]: I0128 12:50:44.709450 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:44 crc kubenswrapper[4685]: I0128 12:50:44.730974 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:45 crc kubenswrapper[4685]: I0128 12:50:45.403362 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:45 crc kubenswrapper[4685]: I0128 12:50:45.403448 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:45 crc kubenswrapper[4685]: I0128 12:50:45.546136 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:50:45 crc kubenswrapper[4685]: E0128 12:50:45.546363 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:50:47 crc kubenswrapper[4685]: I0128 12:50:47.521489 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:47 crc kubenswrapper[4685]: I0128 12:50:47.522527 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:50:47 crc kubenswrapper[4685]: I0128 12:50:47.584223 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.828857 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:50:50 crc kubenswrapper[4685]: E0128 12:50:50.829643 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="extract-content" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.829662 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="extract-content" Jan 28 12:50:50 crc kubenswrapper[4685]: E0128 12:50:50.829678 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="registry-server" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.829688 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="registry-server" Jan 28 12:50:50 crc kubenswrapper[4685]: E0128 12:50:50.829710 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="extract-utilities" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.829719 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="extract-utilities" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.829892 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a39d59dd-42b2-4da5-b205-12db78b4c405" containerName="registry-server" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.831357 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.840769 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.843406 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.862204 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.871264 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.972841 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.975125 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.981303 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.982614 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:50 crc kubenswrapper[4685]: I0128 12:50:50.995325 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.001905 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.011911 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rklxs\" (UniqueName: \"kubernetes.io/projected/180dae19-d035-4741-b0e2-9eeff4224e07-kube-api-access-rklxs\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.011941 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k848\" (UniqueName: \"kubernetes.io/projected/f3edf321-8f65-446f-876c-30420115c2a2-kube-api-access-5k848\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.011964 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.011994 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-logs\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012026 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-scripts\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012046 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-config-data\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012064 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012079 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-sys\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012096 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-dev\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012116 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012140 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012156 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012185 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012205 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012223 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-dev\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012239 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012257 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012275 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-lib-modules\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012292 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-logs\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012308 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-config-data\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012328 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-var-locks-brick\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012344 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-run\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012357 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-sys\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012379 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-iscsi\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012394 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-httpd-run\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012412 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-run\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012431 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-scripts\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.012447 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-nvme\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114336 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-nvme\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114389 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114408 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114425 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-dev\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114442 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114466 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114522 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114587 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114607 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-logs\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114628 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114641 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-scripts\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114663 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-dev\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114679 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114696 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgdxx\" (UniqueName: \"kubernetes.io/projected/728779c7-10ec-437a-a7c5-1ef8b27fa410-kube-api-access-rgdxx\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114715 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114731 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-lib-modules\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114747 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-logs\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114762 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-config-data\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114779 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114797 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-var-locks-brick\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114814 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-httpd-run\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114831 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-run\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114834 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") device mount path \"/mnt/openstack/pv05\"" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114846 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-sys\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114878 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114930 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-var-locks-brick\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.114957 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-run\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.115223 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") device mount path \"/mnt/openstack/pv20\"" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.115251 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.115272 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.115298 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-dev\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.115316 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-lib-modules\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.115333 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-sys\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.115641 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116112 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-logs\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116328 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116377 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-config-data\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116411 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-iscsi\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116434 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-httpd-run\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116467 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-var-locks-brick\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116493 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116513 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116539 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-run\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116548 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-iscsi\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116612 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-scripts\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116625 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-run\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116731 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-nvme\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116779 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-nvme\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116810 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-run\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116836 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-logs\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116856 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rklxs\" (UniqueName: \"kubernetes.io/projected/180dae19-d035-4741-b0e2-9eeff4224e07-kube-api-access-rklxs\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116874 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k848\" (UniqueName: \"kubernetes.io/projected/f3edf321-8f65-446f-876c-30420115c2a2-kube-api-access-5k848\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116891 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116906 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-logs\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116920 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-sys\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116945 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-run\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116958 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-sys\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116981 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-scripts\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.116999 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-scripts\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117014 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-iscsi\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117035 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117053 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-config-data\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117077 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117092 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-config-data\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117110 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-sys\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117131 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxq7g\" (UniqueName: \"kubernetes.io/projected/a83d5fc7-63c1-40f5-827c-54ee37462203-kube-api-access-bxq7g\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117130 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-httpd-run\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117150 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-dev\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117254 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-logs\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117225 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-dev\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117523 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117562 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-dev\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117650 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-sys\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117730 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") device mount path \"/mnt/openstack/pv17\"" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117774 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-lib-modules\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.117849 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.118186 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.118284 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") device mount path \"/mnt/openstack/pv11\"" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.124427 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-scripts\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.124642 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-config-data\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.137350 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-scripts\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.140348 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-config-data\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.141503 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rklxs\" (UniqueName: \"kubernetes.io/projected/180dae19-d035-4741-b0e2-9eeff4224e07-kube-api-access-rklxs\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.150083 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k848\" (UniqueName: \"kubernetes.io/projected/f3edf321-8f65-446f-876c-30420115c2a2-kube-api-access-5k848\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.152476 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.163746 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.164006 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.191587 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.219303 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-run\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.219581 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-logs\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.219741 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-sys\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.219949 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-run\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220103 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-sys\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.219440 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-run\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220011 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-run\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220302 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-scripts\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220181 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-sys\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220476 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-iscsi\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220564 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.219872 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-sys\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220578 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-iscsi\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220207 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-logs\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220665 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-config-data\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220717 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxq7g\" (UniqueName: \"kubernetes.io/projected/a83d5fc7-63c1-40f5-827c-54ee37462203-kube-api-access-bxq7g\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220730 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220760 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-dev\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220794 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-lib-modules\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220825 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220875 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-nvme\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220911 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-dev\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220928 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221014 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.220925 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-lib-modules\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221046 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-dev\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221059 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") device mount path \"/mnt/openstack/pv10\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221075 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221076 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-nvme\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221118 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") device mount path \"/mnt/openstack/pv07\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221486 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221526 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-dev\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221534 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221129 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-logs\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221621 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-scripts\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221700 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgdxx\" (UniqueName: \"kubernetes.io/projected/728779c7-10ec-437a-a7c5-1ef8b27fa410-kube-api-access-rgdxx\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221751 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222242 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") device mount path \"/mnt/openstack/pv04\"" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222256 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-httpd-run\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222318 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222422 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-config-data\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222467 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-var-locks-brick\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222501 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222532 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.221628 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-logs\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222796 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") device mount path \"/mnt/openstack/pv12\"" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.222934 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-var-locks-brick\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.223013 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.223206 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.223351 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-httpd-run\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.227708 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-scripts\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.228647 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-config-data\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.233659 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-scripts\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.235607 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-config-data\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.244786 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgdxx\" (UniqueName: \"kubernetes.io/projected/728779c7-10ec-437a-a7c5-1ef8b27fa410-kube-api-access-rgdxx\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.252334 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxq7g\" (UniqueName: \"kubernetes.io/projected/a83d5fc7-63c1-40f5-827c-54ee37462203-kube-api-access-bxq7g\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.255971 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.261387 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-1\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.264009 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.282921 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-2\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.302672 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.314504 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.454814 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.465377 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.834105 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:50:51 crc kubenswrapper[4685]: W0128 12:50:51.839905 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod728779c7_10ec_437a_a7c5_1ef8b27fa410.slice/crio-b95efa6ec10cee60894d03052e2b3c632b1f43082a7acc3d7f78477325734a17 WatchSource:0}: Error finding container b95efa6ec10cee60894d03052e2b3c632b1f43082a7acc3d7f78477325734a17: Status 404 returned error can't find the container with id b95efa6ec10cee60894d03052e2b3c632b1f43082a7acc3d7f78477325734a17 Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.914140 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:50:51 crc kubenswrapper[4685]: I0128 12:50:51.970607 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.017091 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:50:52 crc kubenswrapper[4685]: W0128 12:50:52.030227 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3edf321_8f65_446f_876c_30420115c2a2.slice/crio-124e2b65c31244fba8323e160a94506940c04e3cb591c30396b4534278ce37c9 WatchSource:0}: Error finding container 124e2b65c31244fba8323e160a94506940c04e3cb591c30396b4534278ce37c9: Status 404 returned error can't find the container with id 124e2b65c31244fba8323e160a94506940c04e3cb591c30396b4534278ce37c9 Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.465126 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"180dae19-d035-4741-b0e2-9eeff4224e07","Type":"ContainerStarted","Data":"d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631"} Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.465463 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"180dae19-d035-4741-b0e2-9eeff4224e07","Type":"ContainerStarted","Data":"b03fa9660f80bea478a9752b3b3f8e5ad38fd50511f3edfeaba000c9d4b1cfc6"} Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.468424 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"728779c7-10ec-437a-a7c5-1ef8b27fa410","Type":"ContainerStarted","Data":"fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1"} Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.468689 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"728779c7-10ec-437a-a7c5-1ef8b27fa410","Type":"ContainerStarted","Data":"b95efa6ec10cee60894d03052e2b3c632b1f43082a7acc3d7f78477325734a17"} Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.471016 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"f3edf321-8f65-446f-876c-30420115c2a2","Type":"ContainerStarted","Data":"dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776"} Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.471047 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"f3edf321-8f65-446f-876c-30420115c2a2","Type":"ContainerStarted","Data":"124e2b65c31244fba8323e160a94506940c04e3cb591c30396b4534278ce37c9"} Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.472817 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"a83d5fc7-63c1-40f5-827c-54ee37462203","Type":"ContainerStarted","Data":"a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c"} Jan 28 12:50:52 crc kubenswrapper[4685]: I0128 12:50:52.472844 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"a83d5fc7-63c1-40f5-827c-54ee37462203","Type":"ContainerStarted","Data":"08579835f2307cd16f918f6cb9d719ebff25c7571a8e6224114cf4f67e0c01d5"} Jan 28 12:50:53 crc kubenswrapper[4685]: I0128 12:50:53.480906 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"728779c7-10ec-437a-a7c5-1ef8b27fa410","Type":"ContainerStarted","Data":"56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1"} Jan 28 12:50:53 crc kubenswrapper[4685]: I0128 12:50:53.482541 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"a83d5fc7-63c1-40f5-827c-54ee37462203","Type":"ContainerStarted","Data":"ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae"} Jan 28 12:50:53 crc kubenswrapper[4685]: I0128 12:50:53.485460 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"180dae19-d035-4741-b0e2-9eeff4224e07","Type":"ContainerStarted","Data":"960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e"} Jan 28 12:50:53 crc kubenswrapper[4685]: I0128 12:50:53.488154 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"f3edf321-8f65-446f-876c-30420115c2a2","Type":"ContainerStarted","Data":"0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9"} Jan 28 12:50:53 crc kubenswrapper[4685]: I0128 12:50:53.511055 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-2" podStartSLOduration=4.511033331 podStartE2EDuration="4.511033331s" podCreationTimestamp="2026-01-28 12:50:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:53.509159178 +0000 UTC m=+1804.596573063" watchObservedRunningTime="2026-01-28 12:50:53.511033331 +0000 UTC m=+1804.598447176" Jan 28 12:50:53 crc kubenswrapper[4685]: I0128 12:50:53.533711 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-1" podStartSLOduration=4.533688891 podStartE2EDuration="4.533688891s" podCreationTimestamp="2026-01-28 12:50:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:53.529471862 +0000 UTC m=+1804.616885697" watchObservedRunningTime="2026-01-28 12:50:53.533688891 +0000 UTC m=+1804.621102726" Jan 28 12:50:53 crc kubenswrapper[4685]: I0128 12:50:53.553355 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-1" podStartSLOduration=4.553333917 podStartE2EDuration="4.553333917s" podCreationTimestamp="2026-01-28 12:50:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:53.551069593 +0000 UTC m=+1804.638483448" watchObservedRunningTime="2026-01-28 12:50:53.553333917 +0000 UTC m=+1804.640747752" Jan 28 12:50:59 crc kubenswrapper[4685]: I0128 12:50:59.546345 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:50:59 crc kubenswrapper[4685]: E0128 12:50:59.547205 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.303551 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.303601 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.315959 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.316001 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.328810 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.345626 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.345986 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.353115 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-2" podStartSLOduration=12.353100036 podStartE2EDuration="12.353100036s" podCreationTimestamp="2026-01-28 12:50:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:50:53.573757744 +0000 UTC m=+1804.661171589" watchObservedRunningTime="2026-01-28 12:51:01.353100036 +0000 UTC m=+1812.440513871" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.361004 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.455883 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.455951 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.466160 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.466236 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.482907 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.492231 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.499152 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.514777 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546074 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546110 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546120 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546130 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546141 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546150 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546159 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:01 crc kubenswrapper[4685]: I0128 12:51:01.546168 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.558341 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.558381 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.558393 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.558421 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.558341 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.558473 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.603540 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.696736 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.782602 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.782720 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.784705 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.802339 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.957066 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:03 crc kubenswrapper[4685]: I0128 12:51:03.961758 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:04 crc kubenswrapper[4685]: I0128 12:51:04.011621 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:04 crc kubenswrapper[4685]: I0128 12:51:04.487810 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:51:04 crc kubenswrapper[4685]: I0128 12:51:04.497317 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:51:04 crc kubenswrapper[4685]: I0128 12:51:04.820948 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:51:04 crc kubenswrapper[4685]: I0128 12:51:04.829466 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.586131 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-2" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-log" containerID="cri-o://dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776" gracePeriod=30 Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.586639 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-2" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-httpd" containerID="cri-o://0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9" gracePeriod=30 Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.587212 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-2" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-log" containerID="cri-o://fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1" gracePeriod=30 Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.587461 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-log" containerID="cri-o://d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631" gracePeriod=30 Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.587603 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-2" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-httpd" containerID="cri-o://56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1" gracePeriod=30 Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.587603 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-httpd" containerID="cri-o://960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e" gracePeriod=30 Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.596708 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-internal-api-2" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.0.129:9292/healthcheck\": EOF" Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.597056 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-internal-api-2" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.0.129:9292/healthcheck\": EOF" Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.601427 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.0.127:9292/healthcheck\": EOF" Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.601427 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.0.127:9292/healthcheck\": EOF" Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.604302 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-external-api-2" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.0.128:9292/healthcheck\": EOF" Jan 28 12:51:05 crc kubenswrapper[4685]: I0128 12:51:05.604543 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-external-api-2" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.0.128:9292/healthcheck\": EOF" Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.596470 4685 generic.go:334] "Generic (PLEG): container finished" podID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerID="fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1" exitCode=143 Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.596815 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"728779c7-10ec-437a-a7c5-1ef8b27fa410","Type":"ContainerDied","Data":"fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1"} Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.601879 4685 generic.go:334] "Generic (PLEG): container finished" podID="180dae19-d035-4741-b0e2-9eeff4224e07" containerID="d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631" exitCode=143 Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.601978 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"180dae19-d035-4741-b0e2-9eeff4224e07","Type":"ContainerDied","Data":"d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631"} Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.604547 4685 generic.go:334] "Generic (PLEG): container finished" podID="f3edf321-8f65-446f-876c-30420115c2a2" containerID="dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776" exitCode=143 Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.604596 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"f3edf321-8f65-446f-876c-30420115c2a2","Type":"ContainerDied","Data":"dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776"} Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.604871 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-log" containerID="cri-o://a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c" gracePeriod=30 Jan 28 12:51:06 crc kubenswrapper[4685]: I0128 12:51:06.605033 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-httpd" containerID="cri-o://ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae" gracePeriod=30 Jan 28 12:51:07 crc kubenswrapper[4685]: I0128 12:51:07.619158 4685 generic.go:334] "Generic (PLEG): container finished" podID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerID="a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c" exitCode=143 Jan 28 12:51:07 crc kubenswrapper[4685]: I0128 12:51:07.619209 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"a83d5fc7-63c1-40f5-827c-54ee37462203","Type":"ContainerDied","Data":"a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c"} Jan 28 12:51:09 crc kubenswrapper[4685]: I0128 12:51:09.996989 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.075632 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-scripts\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.075691 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-httpd-run\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.075803 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-dev\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.075853 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.075889 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-config-data\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.075919 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-sys\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.075967 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-var-locks-brick\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076003 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-lib-modules\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076075 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076107 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxq7g\" (UniqueName: \"kubernetes.io/projected/a83d5fc7-63c1-40f5-827c-54ee37462203-kube-api-access-bxq7g\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076150 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-nvme\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076189 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-logs\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076208 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-run\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076239 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-iscsi\") pod \"a83d5fc7-63c1-40f5-827c-54ee37462203\" (UID: \"a83d5fc7-63c1-40f5-827c-54ee37462203\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076580 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076827 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076839 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.076910 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-dev" (OuterVolumeSpecName: "dev") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.081438 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.081493 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-run" (OuterVolumeSpecName: "run") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.081519 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-sys" (OuterVolumeSpecName: "sys") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.081546 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.081678 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-logs" (OuterVolumeSpecName: "logs") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.081803 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.081899 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance-cache") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.085323 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-scripts" (OuterVolumeSpecName: "scripts") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.088387 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a83d5fc7-63c1-40f5-827c-54ee37462203-kube-api-access-bxq7g" (OuterVolumeSpecName: "kube-api-access-bxq7g") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "kube-api-access-bxq7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.134328 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-config-data" (OuterVolumeSpecName: "config-data") pod "a83d5fc7-63c1-40f5-827c-54ee37462203" (UID: "a83d5fc7-63c1-40f5-827c-54ee37462203"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178133 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178209 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxq7g\" (UniqueName: \"kubernetes.io/projected/a83d5fc7-63c1-40f5-827c-54ee37462203-kube-api-access-bxq7g\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178252 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178266 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178278 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178293 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178328 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178343 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a83d5fc7-63c1-40f5-827c-54ee37462203-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178357 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178377 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178415 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83d5fc7-63c1-40f5-827c-54ee37462203-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178429 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178441 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.178453 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a83d5fc7-63c1-40f5-827c-54ee37462203-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.192859 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.200589 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.282071 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.282119 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.403419 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.464739 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484448 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-iscsi\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484497 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-config-data\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484520 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-dev\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484539 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-scripts\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484601 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484633 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-lib-modules\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484677 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-logs\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484735 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-sys\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484753 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rklxs\" (UniqueName: \"kubernetes.io/projected/180dae19-d035-4741-b0e2-9eeff4224e07-kube-api-access-rklxs\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484768 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-run\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484766 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-dev" (OuterVolumeSpecName: "dev") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484835 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484839 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-httpd-run\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484899 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484931 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-nvme\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.484965 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-var-locks-brick\") pod \"180dae19-d035-4741-b0e2-9eeff4224e07\" (UID: \"180dae19-d035-4741-b0e2-9eeff4224e07\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485056 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485091 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485333 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-logs" (OuterVolumeSpecName: "logs") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485360 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-sys" (OuterVolumeSpecName: "sys") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485425 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485466 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485489 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-run" (OuterVolumeSpecName: "run") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485820 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485839 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485873 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485881 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485889 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/180dae19-d035-4741-b0e2-9eeff4224e07-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485897 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485905 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485914 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.485923 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/180dae19-d035-4741-b0e2-9eeff4224e07-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.489213 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage17-crc" (OuterVolumeSpecName: "glance-cache") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "local-storage17-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.489343 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-scripts" (OuterVolumeSpecName: "scripts") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.497235 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage20-crc" (OuterVolumeSpecName: "glance") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "local-storage20-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.497280 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/180dae19-d035-4741-b0e2-9eeff4224e07-kube-api-access-rklxs" (OuterVolumeSpecName: "kube-api-access-rklxs") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "kube-api-access-rklxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.508727 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.551948 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-config-data" (OuterVolumeSpecName: "config-data") pod "180dae19-d035-4741-b0e2-9eeff4224e07" (UID: "180dae19-d035-4741-b0e2-9eeff4224e07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.587845 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-iscsi\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588460 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-run\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588604 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-nvme\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588765 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k848\" (UniqueName: \"kubernetes.io/projected/f3edf321-8f65-446f-876c-30420115c2a2-kube-api-access-5k848\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588812 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-var-locks-brick\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588858 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-var-locks-brick\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588895 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-lib-modules\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588937 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588967 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-logs\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588991 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589020 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-dev\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589047 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-iscsi\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589069 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-httpd-run\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589109 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-httpd-run\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589130 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-nvme\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589153 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-dev\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589191 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-scripts\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589211 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589231 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-scripts\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589258 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-config-data\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589280 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-lib-modules\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589308 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-sys\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589334 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-config-data\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589357 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589409 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-sys\") pod \"f3edf321-8f65-446f-876c-30420115c2a2\" (UID: \"f3edf321-8f65-446f-876c-30420115c2a2\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589427 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-run\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589450 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-logs\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589477 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgdxx\" (UniqueName: \"kubernetes.io/projected/728779c7-10ec-437a-a7c5-1ef8b27fa410-kube-api-access-rgdxx\") pod \"728779c7-10ec-437a-a7c5-1ef8b27fa410\" (UID: \"728779c7-10ec-437a-a7c5-1ef8b27fa410\") " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588370 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589820 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589850 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-dev" (OuterVolumeSpecName: "dev") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589945 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-dev" (OuterVolumeSpecName: "dev") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590013 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588543 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-run" (OuterVolumeSpecName: "run") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.588746 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.589661 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590479 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-sys" (OuterVolumeSpecName: "sys") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590530 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-run" (OuterVolumeSpecName: "run") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590548 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-sys" (OuterVolumeSpecName: "sys") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590638 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590827 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590851 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.590874 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.591550 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.591834 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.591988 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.592279 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.592385 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rklxs\" (UniqueName: \"kubernetes.io/projected/180dae19-d035-4741-b0e2-9eeff4224e07-kube-api-access-rklxs\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.592622 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.593334 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.593442 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/180dae19-d035-4741-b0e2-9eeff4224e07-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.592876 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-logs" (OuterVolumeSpecName: "logs") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.592966 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.593069 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-logs" (OuterVolumeSpecName: "logs") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.594571 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance-cache") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.595132 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance-cache") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.597188 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.604987 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3edf321-8f65-446f-876c-30420115c2a2-kube-api-access-5k848" (OuterVolumeSpecName: "kube-api-access-5k848") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "kube-api-access-5k848". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.613595 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-scripts" (OuterVolumeSpecName: "scripts") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.616222 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-scripts" (OuterVolumeSpecName: "scripts") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.616992 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage17-crc" (UniqueName: "kubernetes.io/local-volume/local-storage17-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.618183 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/728779c7-10ec-437a-a7c5-1ef8b27fa410-kube-api-access-rgdxx" (OuterVolumeSpecName: "kube-api-access-rgdxx") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "kube-api-access-rgdxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.623903 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage20-crc" (UniqueName: "kubernetes.io/local-volume/local-storage20-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.636715 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-config-data" (OuterVolumeSpecName: "config-data") pod "f3edf321-8f65-446f-876c-30420115c2a2" (UID: "f3edf321-8f65-446f-876c-30420115c2a2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.646070 4685 generic.go:334] "Generic (PLEG): container finished" podID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerID="56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1" exitCode=0 Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.646150 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"728779c7-10ec-437a-a7c5-1ef8b27fa410","Type":"ContainerDied","Data":"56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.646198 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-2" event={"ID":"728779c7-10ec-437a-a7c5-1ef8b27fa410","Type":"ContainerDied","Data":"b95efa6ec10cee60894d03052e2b3c632b1f43082a7acc3d7f78477325734a17"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.646219 4685 scope.go:117] "RemoveContainer" containerID="56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.646427 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-2" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.650637 4685 generic.go:334] "Generic (PLEG): container finished" podID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerID="ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae" exitCode=0 Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.650699 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"a83d5fc7-63c1-40f5-827c-54ee37462203","Type":"ContainerDied","Data":"ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.650733 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"a83d5fc7-63c1-40f5-827c-54ee37462203","Type":"ContainerDied","Data":"08579835f2307cd16f918f6cb9d719ebff25c7571a8e6224114cf4f67e0c01d5"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.650823 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.654900 4685 generic.go:334] "Generic (PLEG): container finished" podID="180dae19-d035-4741-b0e2-9eeff4224e07" containerID="960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e" exitCode=0 Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.654960 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.654966 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"180dae19-d035-4741-b0e2-9eeff4224e07","Type":"ContainerDied","Data":"960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.654990 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"180dae19-d035-4741-b0e2-9eeff4224e07","Type":"ContainerDied","Data":"b03fa9660f80bea478a9752b3b3f8e5ad38fd50511f3edfeaba000c9d4b1cfc6"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.655656 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-config-data" (OuterVolumeSpecName: "config-data") pod "728779c7-10ec-437a-a7c5-1ef8b27fa410" (UID: "728779c7-10ec-437a-a7c5-1ef8b27fa410"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.663395 4685 generic.go:334] "Generic (PLEG): container finished" podID="f3edf321-8f65-446f-876c-30420115c2a2" containerID="0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9" exitCode=0 Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.663455 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"f3edf321-8f65-446f-876c-30420115c2a2","Type":"ContainerDied","Data":"0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.663481 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-2" event={"ID":"f3edf321-8f65-446f-876c-30420115c2a2","Type":"ContainerDied","Data":"124e2b65c31244fba8323e160a94506940c04e3cb591c30396b4534278ce37c9"} Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.663570 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-2" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.676924 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.687392 4685 scope.go:117] "RemoveContainer" containerID="fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.688013 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699122 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699156 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699185 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699197 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699210 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgdxx\" (UniqueName: \"kubernetes.io/projected/728779c7-10ec-437a-a7c5-1ef8b27fa410-kube-api-access-rgdxx\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699222 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699232 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699244 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k848\" (UniqueName: \"kubernetes.io/projected/f3edf321-8f65-446f-876c-30420115c2a2-kube-api-access-5k848\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699254 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699265 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699276 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699301 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699314 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699331 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699342 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699353 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699364 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/728779c7-10ec-437a-a7c5-1ef8b27fa410-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699374 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3edf321-8f65-446f-876c-30420115c2a2-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699384 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699394 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699404 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699449 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f3edf321-8f65-446f-876c-30420115c2a2-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699496 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699511 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699522 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/728779c7-10ec-437a-a7c5-1ef8b27fa410-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699533 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/728779c7-10ec-437a-a7c5-1ef8b27fa410-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699543 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3edf321-8f65-446f-876c-30420115c2a2-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.699559 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.701126 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.707031 4685 scope.go:117] "RemoveContainer" containerID="56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.708067 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1\": container with ID starting with 56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1 not found: ID does not exist" containerID="56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.708137 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1"} err="failed to get container status \"56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1\": rpc error: code = NotFound desc = could not find container \"56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1\": container with ID starting with 56d7751998c58327013b7706a8a7b726f85f870358a2a4ffd98d23dc6e1698b1 not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.708180 4685 scope.go:117] "RemoveContainer" containerID="fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.709975 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1\": container with ID starting with fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1 not found: ID does not exist" containerID="fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.710013 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1"} err="failed to get container status \"fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1\": rpc error: code = NotFound desc = could not find container \"fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1\": container with ID starting with fcf03e9a05b79483f749da0cb0bf4acfd26da8d0b766503190a4a1c352a4b2f1 not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.710055 4685 scope.go:117] "RemoveContainer" containerID="ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.712826 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.719337 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.720114 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.728908 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.729638 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.730465 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.737986 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-2"] Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.746160 4685 scope.go:117] "RemoveContainer" containerID="a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.801774 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.801814 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.801825 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.801836 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.824089 4685 scope.go:117] "RemoveContainer" containerID="ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.826278 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae\": container with ID starting with ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae not found: ID does not exist" containerID="ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.826320 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae"} err="failed to get container status \"ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae\": rpc error: code = NotFound desc = could not find container \"ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae\": container with ID starting with ae10b8c59713ae699f0fc9b1120564d9a414f922198f92b69a4df4b75e02e4ae not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.826347 4685 scope.go:117] "RemoveContainer" containerID="a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.826662 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c\": container with ID starting with a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c not found: ID does not exist" containerID="a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.826691 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c"} err="failed to get container status \"a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c\": rpc error: code = NotFound desc = could not find container \"a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c\": container with ID starting with a96cbe9d8a58e4ea92017d1e508c6421ac7778d67d74358efa5cf8439e13f03c not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.826710 4685 scope.go:117] "RemoveContainer" containerID="960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.847350 4685 scope.go:117] "RemoveContainer" containerID="d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.865709 4685 scope.go:117] "RemoveContainer" containerID="960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.866151 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e\": container with ID starting with 960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e not found: ID does not exist" containerID="960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.866224 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e"} err="failed to get container status \"960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e\": rpc error: code = NotFound desc = could not find container \"960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e\": container with ID starting with 960f866769de4e206a65af40c58c86c14eacd0977a5990b718f8b812eb19d84e not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.866244 4685 scope.go:117] "RemoveContainer" containerID="d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.866417 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631\": container with ID starting with d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631 not found: ID does not exist" containerID="d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.866445 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631"} err="failed to get container status \"d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631\": rpc error: code = NotFound desc = could not find container \"d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631\": container with ID starting with d780e3f653d82aa184f0f1d229ea4bdf5663c481bb2e01dc9dda590a13888631 not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.866460 4685 scope.go:117] "RemoveContainer" containerID="0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.888612 4685 scope.go:117] "RemoveContainer" containerID="dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.904348 4685 scope.go:117] "RemoveContainer" containerID="0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.904819 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9\": container with ID starting with 0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9 not found: ID does not exist" containerID="0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.904864 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9"} err="failed to get container status \"0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9\": rpc error: code = NotFound desc = could not find container \"0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9\": container with ID starting with 0a16cd863c76b6716670c0cf89d76f1fc57e3f42c869f206cbd8a84151d98ca9 not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.904893 4685 scope.go:117] "RemoveContainer" containerID="dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776" Jan 28 12:51:10 crc kubenswrapper[4685]: E0128 12:51:10.905214 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776\": container with ID starting with dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776 not found: ID does not exist" containerID="dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.905230 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776"} err="failed to get container status \"dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776\": rpc error: code = NotFound desc = could not find container \"dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776\": container with ID starting with dceedf8fe963926559d668e21f944b847fdf72ca1781a4acf730e7709e791776 not found: ID does not exist" Jan 28 12:51:10 crc kubenswrapper[4685]: I0128 12:51:10.996222 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:51:11 crc kubenswrapper[4685]: I0128 12:51:11.005969 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-2"] Jan 28 12:51:11 crc kubenswrapper[4685]: I0128 12:51:11.546096 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:51:11 crc kubenswrapper[4685]: E0128 12:51:11.546376 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:51:11 crc kubenswrapper[4685]: I0128 12:51:11.838715 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:51:11 crc kubenswrapper[4685]: I0128 12:51:11.839007 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-log" containerID="cri-o://09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd" gracePeriod=30 Jan 28 12:51:11 crc kubenswrapper[4685]: I0128 12:51:11.839091 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-httpd" containerID="cri-o://46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880" gracePeriod=30 Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.375559 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.376046 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-log" containerID="cri-o://9886f3be435b86f11e3fb0ba6aa81563d78d81872cbf05b6283a2df24f9af9fd" gracePeriod=30 Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.376160 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-httpd" containerID="cri-o://1d0f87b7c9e13938d9827df9d7e56a36f908d01d8d71150375bb2a13da302ade" gracePeriod=30 Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.554958 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" path="/var/lib/kubelet/pods/180dae19-d035-4741-b0e2-9eeff4224e07/volumes" Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.555741 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" path="/var/lib/kubelet/pods/728779c7-10ec-437a-a7c5-1ef8b27fa410/volumes" Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.556402 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" path="/var/lib/kubelet/pods/a83d5fc7-63c1-40f5-827c-54ee37462203/volumes" Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.557721 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3edf321-8f65-446f-876c-30420115c2a2" path="/var/lib/kubelet/pods/f3edf321-8f65-446f-876c-30420115c2a2/volumes" Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.686517 4685 generic.go:334] "Generic (PLEG): container finished" podID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerID="09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd" exitCode=143 Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.686632 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"4c0d265d-5416-4aab-b089-1b9a8356fb92","Type":"ContainerDied","Data":"09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd"} Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.689031 4685 generic.go:334] "Generic (PLEG): container finished" podID="77743e75-2a16-43c9-9d38-a38a69bda189" containerID="9886f3be435b86f11e3fb0ba6aa81563d78d81872cbf05b6283a2df24f9af9fd" exitCode=143 Jan 28 12:51:12 crc kubenswrapper[4685]: I0128 12:51:12.689073 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"77743e75-2a16-43c9-9d38-a38a69bda189","Type":"ContainerDied","Data":"9886f3be435b86f11e3fb0ba6aa81563d78d81872cbf05b6283a2df24f9af9fd"} Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.301357 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366490 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-sys\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366559 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-httpd-run\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366594 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-dev\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366639 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-var-locks-brick\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366668 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-config-data\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366693 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366715 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366744 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-lib-modules\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366761 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfbtq\" (UniqueName: \"kubernetes.io/projected/4c0d265d-5416-4aab-b089-1b9a8356fb92-kube-api-access-xfbtq\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366807 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-logs\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366823 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-iscsi\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366837 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-scripts\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366863 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-nvme\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.366910 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-run\") pod \"4c0d265d-5416-4aab-b089-1b9a8356fb92\" (UID: \"4c0d265d-5416-4aab-b089-1b9a8356fb92\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.367218 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-run" (OuterVolumeSpecName: "run") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.367252 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-sys" (OuterVolumeSpecName: "sys") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.367528 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.367562 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-dev" (OuterVolumeSpecName: "dev") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.367581 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.369911 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.370129 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.370232 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.370503 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-logs" (OuterVolumeSpecName: "logs") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.373466 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c0d265d-5416-4aab-b089-1b9a8356fb92-kube-api-access-xfbtq" (OuterVolumeSpecName: "kube-api-access-xfbtq") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "kube-api-access-xfbtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.373880 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-scripts" (OuterVolumeSpecName: "scripts") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.374535 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage18-crc" (OuterVolumeSpecName: "glance") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "local-storage18-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.385010 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage14-crc" (OuterVolumeSpecName: "glance-cache") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "local-storage14-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.403957 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-config-data" (OuterVolumeSpecName: "config-data") pod "4c0d265d-5416-4aab-b089-1b9a8356fb92" (UID: "4c0d265d-5416-4aab-b089-1b9a8356fb92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468419 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468458 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468486 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") on node \"crc\" " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468498 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468508 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468519 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfbtq\" (UniqueName: \"kubernetes.io/projected/4c0d265d-5416-4aab-b089-1b9a8356fb92-kube-api-access-xfbtq\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468530 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468538 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468546 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c0d265d-5416-4aab-b089-1b9a8356fb92-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468553 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468561 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468569 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468613 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c0d265d-5416-4aab-b089-1b9a8356fb92-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.468622 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4c0d265d-5416-4aab-b089-1b9a8356fb92-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.483689 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage14-crc" (UniqueName: "kubernetes.io/local-volume/local-storage14-crc") on node "crc" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.486813 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage18-crc" (UniqueName: "kubernetes.io/local-volume/local-storage18-crc") on node "crc" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.569903 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage18-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage18-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.569958 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.718748 4685 generic.go:334] "Generic (PLEG): container finished" podID="77743e75-2a16-43c9-9d38-a38a69bda189" containerID="1d0f87b7c9e13938d9827df9d7e56a36f908d01d8d71150375bb2a13da302ade" exitCode=0 Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.718807 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"77743e75-2a16-43c9-9d38-a38a69bda189","Type":"ContainerDied","Data":"1d0f87b7c9e13938d9827df9d7e56a36f908d01d8d71150375bb2a13da302ade"} Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.720535 4685 generic.go:334] "Generic (PLEG): container finished" podID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerID="46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880" exitCode=0 Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.720576 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"4c0d265d-5416-4aab-b089-1b9a8356fb92","Type":"ContainerDied","Data":"46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880"} Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.720615 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"4c0d265d-5416-4aab-b089-1b9a8356fb92","Type":"ContainerDied","Data":"bb1c2db715a93321bc7b02ae5a641ae05ff79c7f783729da4ec5345b3d3a03b7"} Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.720631 4685 scope.go:117] "RemoveContainer" containerID="46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.720628 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.771149 4685 scope.go:117] "RemoveContainer" containerID="09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.777365 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.786101 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.793022 4685 scope.go:117] "RemoveContainer" containerID="46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880" Jan 28 12:51:15 crc kubenswrapper[4685]: E0128 12:51:15.793668 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880\": container with ID starting with 46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880 not found: ID does not exist" containerID="46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.793707 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880"} err="failed to get container status \"46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880\": rpc error: code = NotFound desc = could not find container \"46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880\": container with ID starting with 46e7ba47a37ad423a0a711c965eaf5123e70a498c0d0a3036d6b494dc036d880 not found: ID does not exist" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.793729 4685 scope.go:117] "RemoveContainer" containerID="09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd" Jan 28 12:51:15 crc kubenswrapper[4685]: E0128 12:51:15.794119 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd\": container with ID starting with 09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd not found: ID does not exist" containerID="09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.794185 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd"} err="failed to get container status \"09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd\": rpc error: code = NotFound desc = could not find container \"09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd\": container with ID starting with 09fb36ad5dcdee08f92bdfde889e3281e2e8f95570f5b5d4bf8c6fe4df6a2ddd not found: ID does not exist" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.826048 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.873908 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.873969 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-var-locks-brick\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.873988 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-sys\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874035 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-nvme\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874071 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-config-data\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874087 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-run\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874110 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-lib-modules\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874126 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-dev\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874147 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kndj\" (UniqueName: \"kubernetes.io/projected/77743e75-2a16-43c9-9d38-a38a69bda189-kube-api-access-4kndj\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874186 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874211 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-httpd-run\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874254 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-iscsi\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874275 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-scripts\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874329 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-logs\") pod \"77743e75-2a16-43c9-9d38-a38a69bda189\" (UID: \"77743e75-2a16-43c9-9d38-a38a69bda189\") " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874392 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874441 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874511 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874535 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-sys" (OuterVolumeSpecName: "sys") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874684 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874696 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874706 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874714 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874739 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-run" (OuterVolumeSpecName: "run") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874759 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-dev" (OuterVolumeSpecName: "dev") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874776 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.874983 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.875563 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-logs" (OuterVolumeSpecName: "logs") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.879050 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.879053 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-scripts" (OuterVolumeSpecName: "scripts") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.879643 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage19-crc" (OuterVolumeSpecName: "glance-cache") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "local-storage19-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.882139 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77743e75-2a16-43c9-9d38-a38a69bda189-kube-api-access-4kndj" (OuterVolumeSpecName: "kube-api-access-4kndj") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "kube-api-access-4kndj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.911524 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-config-data" (OuterVolumeSpecName: "config-data") pod "77743e75-2a16-43c9-9d38-a38a69bda189" (UID: "77743e75-2a16-43c9-9d38-a38a69bda189"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975733 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975776 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975816 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975829 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kndj\" (UniqueName: \"kubernetes.io/projected/77743e75-2a16-43c9-9d38-a38a69bda189-kube-api-access-4kndj\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975867 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") on node \"crc\" " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975884 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975893 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77743e75-2a16-43c9-9d38-a38a69bda189-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975904 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77743e75-2a16-43c9-9d38-a38a69bda189-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975913 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77743e75-2a16-43c9-9d38-a38a69bda189-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.975930 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.989451 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage19-crc" (UniqueName: "kubernetes.io/local-volume/local-storage19-crc") on node "crc" Jan 28 12:51:15 crc kubenswrapper[4685]: I0128 12:51:15.990891 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.076940 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage19-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage19-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.076989 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.577026 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" path="/var/lib/kubelet/pods/4c0d265d-5416-4aab-b089-1b9a8356fb92/volumes" Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.735606 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"77743e75-2a16-43c9-9d38-a38a69bda189","Type":"ContainerDied","Data":"bd7a7958704853c4b0ce2f5092fdd457b32a83f4890fb207edd0509582ea0b94"} Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.735691 4685 scope.go:117] "RemoveContainer" containerID="1d0f87b7c9e13938d9827df9d7e56a36f908d01d8d71150375bb2a13da302ade" Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.735719 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.763614 4685 scope.go:117] "RemoveContainer" containerID="9886f3be435b86f11e3fb0ba6aa81563d78d81872cbf05b6283a2df24f9af9fd" Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.767450 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:51:16 crc kubenswrapper[4685]: I0128 12:51:16.776047 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.495571 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-kxlwl"] Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.501113 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-kxlwl"] Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.527721 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance9468-account-delete-2fvp4"] Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528000 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528015 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528030 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528035 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528048 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528054 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528066 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528071 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528083 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528088 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528097 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528103 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528113 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528118 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528126 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528133 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528141 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528146 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528158 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528163 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528265 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528271 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: E0128 12:51:17.528280 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528285 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528398 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528408 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528417 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528426 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528434 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3edf321-8f65-446f-876c-30420115c2a2" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528440 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528446 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528453 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528461 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a83d5fc7-63c1-40f5-827c-54ee37462203" containerName="glance-log" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528469 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c0d265d-5416-4aab-b089-1b9a8356fb92" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528475 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="728779c7-10ec-437a-a7c5-1ef8b27fa410" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528483 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="180dae19-d035-4741-b0e2-9eeff4224e07" containerName="glance-httpd" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.528894 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.535704 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance9468-account-delete-2fvp4"] Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.611020 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab8d4887-9a86-4453-a1b1-110a2f8b9210-operator-scripts\") pod \"glance9468-account-delete-2fvp4\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.611272 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q69cl\" (UniqueName: \"kubernetes.io/projected/ab8d4887-9a86-4453-a1b1-110a2f8b9210-kube-api-access-q69cl\") pod \"glance9468-account-delete-2fvp4\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.712395 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab8d4887-9a86-4453-a1b1-110a2f8b9210-operator-scripts\") pod \"glance9468-account-delete-2fvp4\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.712499 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q69cl\" (UniqueName: \"kubernetes.io/projected/ab8d4887-9a86-4453-a1b1-110a2f8b9210-kube-api-access-q69cl\") pod \"glance9468-account-delete-2fvp4\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.713240 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab8d4887-9a86-4453-a1b1-110a2f8b9210-operator-scripts\") pod \"glance9468-account-delete-2fvp4\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.741107 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q69cl\" (UniqueName: \"kubernetes.io/projected/ab8d4887-9a86-4453-a1b1-110a2f8b9210-kube-api-access-q69cl\") pod \"glance9468-account-delete-2fvp4\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:17 crc kubenswrapper[4685]: I0128 12:51:17.844103 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:18 crc kubenswrapper[4685]: I0128 12:51:18.261678 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance9468-account-delete-2fvp4"] Jan 28 12:51:18 crc kubenswrapper[4685]: I0128 12:51:18.554317 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77743e75-2a16-43c9-9d38-a38a69bda189" path="/var/lib/kubelet/pods/77743e75-2a16-43c9-9d38-a38a69bda189/volumes" Jan 28 12:51:18 crc kubenswrapper[4685]: I0128 12:51:18.554951 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2c27abf-bd6d-467c-aa79-b0c267c57c3b" path="/var/lib/kubelet/pods/f2c27abf-bd6d-467c-aa79-b0c267c57c3b/volumes" Jan 28 12:51:18 crc kubenswrapper[4685]: I0128 12:51:18.755798 4685 generic.go:334] "Generic (PLEG): container finished" podID="ab8d4887-9a86-4453-a1b1-110a2f8b9210" containerID="cade24f3843ef21761779849be56f06f2eebe51e3ef70f09a45b83c7ece0b5bd" exitCode=0 Jan 28 12:51:18 crc kubenswrapper[4685]: I0128 12:51:18.755849 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" event={"ID":"ab8d4887-9a86-4453-a1b1-110a2f8b9210","Type":"ContainerDied","Data":"cade24f3843ef21761779849be56f06f2eebe51e3ef70f09a45b83c7ece0b5bd"} Jan 28 12:51:18 crc kubenswrapper[4685]: I0128 12:51:18.755902 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" event={"ID":"ab8d4887-9a86-4453-a1b1-110a2f8b9210","Type":"ContainerStarted","Data":"0ce754080586d27e2ad5f89b7f5b89d23a15a09ddcf5fbc59ad587addf032adf"} Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.002065 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.048293 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q69cl\" (UniqueName: \"kubernetes.io/projected/ab8d4887-9a86-4453-a1b1-110a2f8b9210-kube-api-access-q69cl\") pod \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.048474 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab8d4887-9a86-4453-a1b1-110a2f8b9210-operator-scripts\") pod \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\" (UID: \"ab8d4887-9a86-4453-a1b1-110a2f8b9210\") " Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.049101 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab8d4887-9a86-4453-a1b1-110a2f8b9210-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ab8d4887-9a86-4453-a1b1-110a2f8b9210" (UID: "ab8d4887-9a86-4453-a1b1-110a2f8b9210"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.053131 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab8d4887-9a86-4453-a1b1-110a2f8b9210-kube-api-access-q69cl" (OuterVolumeSpecName: "kube-api-access-q69cl") pod "ab8d4887-9a86-4453-a1b1-110a2f8b9210" (UID: "ab8d4887-9a86-4453-a1b1-110a2f8b9210"). InnerVolumeSpecName "kube-api-access-q69cl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.150269 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q69cl\" (UniqueName: \"kubernetes.io/projected/ab8d4887-9a86-4453-a1b1-110a2f8b9210-kube-api-access-q69cl\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.150638 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab8d4887-9a86-4453-a1b1-110a2f8b9210-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.771347 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" event={"ID":"ab8d4887-9a86-4453-a1b1-110a2f8b9210","Type":"ContainerDied","Data":"0ce754080586d27e2ad5f89b7f5b89d23a15a09ddcf5fbc59ad587addf032adf"} Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.771388 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ce754080586d27e2ad5f89b7f5b89d23a15a09ddcf5fbc59ad587addf032adf" Jan 28 12:51:20 crc kubenswrapper[4685]: I0128 12:51:20.771442 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance9468-account-delete-2fvp4" Jan 28 12:51:22 crc kubenswrapper[4685]: I0128 12:51:22.558163 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-create-jnz4l"] Jan 28 12:51:22 crc kubenswrapper[4685]: I0128 12:51:22.561487 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-create-jnz4l"] Jan 28 12:51:22 crc kubenswrapper[4685]: I0128 12:51:22.579827 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-9468-account-create-update-lmsmx"] Jan 28 12:51:22 crc kubenswrapper[4685]: I0128 12:51:22.587526 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance9468-account-delete-2fvp4"] Jan 28 12:51:22 crc kubenswrapper[4685]: I0128 12:51:22.595657 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-9468-account-create-update-lmsmx"] Jan 28 12:51:22 crc kubenswrapper[4685]: I0128 12:51:22.603570 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance9468-account-delete-2fvp4"] Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.020739 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-create-6qmq8"] Jan 28 12:51:24 crc kubenswrapper[4685]: E0128 12:51:24.021486 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab8d4887-9a86-4453-a1b1-110a2f8b9210" containerName="mariadb-account-delete" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.021505 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab8d4887-9a86-4453-a1b1-110a2f8b9210" containerName="mariadb-account-delete" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.021684 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab8d4887-9a86-4453-a1b1-110a2f8b9210" containerName="mariadb-account-delete" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.022350 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.028031 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq"] Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.029333 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.031453 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-db-secret" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.039244 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-6qmq8"] Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.059428 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq"] Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.110757 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv6ts\" (UniqueName: \"kubernetes.io/projected/5045b0ff-dc70-437b-9b7d-781bb39cc561-kube-api-access-hv6ts\") pod \"glance-6fc2-account-create-update-4g6qq\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.110858 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvss4\" (UniqueName: \"kubernetes.io/projected/fc9e53de-76a9-4fe0-b514-4769c31942b2-kube-api-access-kvss4\") pod \"glance-db-create-6qmq8\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.110891 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc9e53de-76a9-4fe0-b514-4769c31942b2-operator-scripts\") pod \"glance-db-create-6qmq8\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.110959 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5045b0ff-dc70-437b-9b7d-781bb39cc561-operator-scripts\") pod \"glance-6fc2-account-create-update-4g6qq\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.212118 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5045b0ff-dc70-437b-9b7d-781bb39cc561-operator-scripts\") pod \"glance-6fc2-account-create-update-4g6qq\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.212265 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv6ts\" (UniqueName: \"kubernetes.io/projected/5045b0ff-dc70-437b-9b7d-781bb39cc561-kube-api-access-hv6ts\") pod \"glance-6fc2-account-create-update-4g6qq\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.212332 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvss4\" (UniqueName: \"kubernetes.io/projected/fc9e53de-76a9-4fe0-b514-4769c31942b2-kube-api-access-kvss4\") pod \"glance-db-create-6qmq8\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.212360 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc9e53de-76a9-4fe0-b514-4769c31942b2-operator-scripts\") pod \"glance-db-create-6qmq8\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.213251 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc9e53de-76a9-4fe0-b514-4769c31942b2-operator-scripts\") pod \"glance-db-create-6qmq8\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.213367 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5045b0ff-dc70-437b-9b7d-781bb39cc561-operator-scripts\") pod \"glance-6fc2-account-create-update-4g6qq\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.232436 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvss4\" (UniqueName: \"kubernetes.io/projected/fc9e53de-76a9-4fe0-b514-4769c31942b2-kube-api-access-kvss4\") pod \"glance-db-create-6qmq8\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.233102 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv6ts\" (UniqueName: \"kubernetes.io/projected/5045b0ff-dc70-437b-9b7d-781bb39cc561-kube-api-access-hv6ts\") pod \"glance-6fc2-account-create-update-4g6qq\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.344939 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.352452 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.546034 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:51:24 crc kubenswrapper[4685]: E0128 12:51:24.546680 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.566467 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="147e880f-0cd2-4f1d-9468-ed276cf35ead" path="/var/lib/kubelet/pods/147e880f-0cd2-4f1d-9468-ed276cf35ead/volumes" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.567583 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab8d4887-9a86-4453-a1b1-110a2f8b9210" path="/var/lib/kubelet/pods/ab8d4887-9a86-4453-a1b1-110a2f8b9210/volumes" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.568048 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2c4efda-c923-4818-83c5-e44803358cd8" path="/var/lib/kubelet/pods/e2c4efda-c923-4818-83c5-e44803358cd8/volumes" Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.781279 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq"] Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.803617 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" event={"ID":"5045b0ff-dc70-437b-9b7d-781bb39cc561","Type":"ContainerStarted","Data":"1d4fb4934127b6bb13e5ae7df769d751a33b6e4829621066a277ba19aaab98e7"} Jan 28 12:51:24 crc kubenswrapper[4685]: I0128 12:51:24.839662 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-6qmq8"] Jan 28 12:51:25 crc kubenswrapper[4685]: I0128 12:51:25.813396 4685 generic.go:334] "Generic (PLEG): container finished" podID="5045b0ff-dc70-437b-9b7d-781bb39cc561" containerID="ecb09d2dba97e99a4739a4f646ed9c58d2d9f3c2872d2996a98add67c8cd24b0" exitCode=0 Jan 28 12:51:25 crc kubenswrapper[4685]: I0128 12:51:25.813503 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" event={"ID":"5045b0ff-dc70-437b-9b7d-781bb39cc561","Type":"ContainerDied","Data":"ecb09d2dba97e99a4739a4f646ed9c58d2d9f3c2872d2996a98add67c8cd24b0"} Jan 28 12:51:25 crc kubenswrapper[4685]: I0128 12:51:25.815149 4685 generic.go:334] "Generic (PLEG): container finished" podID="fc9e53de-76a9-4fe0-b514-4769c31942b2" containerID="b71f84ee8acf7ef3aecd638540bb95af6e6943c74eeac4b65a07e35cf75c75cb" exitCode=0 Jan 28 12:51:25 crc kubenswrapper[4685]: I0128 12:51:25.815197 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-6qmq8" event={"ID":"fc9e53de-76a9-4fe0-b514-4769c31942b2","Type":"ContainerDied","Data":"b71f84ee8acf7ef3aecd638540bb95af6e6943c74eeac4b65a07e35cf75c75cb"} Jan 28 12:51:25 crc kubenswrapper[4685]: I0128 12:51:25.815283 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-6qmq8" event={"ID":"fc9e53de-76a9-4fe0-b514-4769c31942b2","Type":"ContainerStarted","Data":"1be3840fca29dab9211ae21cb8270cf0b8316a0997f3ee314297008d37748387"} Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.162675 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.169887 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.264482 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvss4\" (UniqueName: \"kubernetes.io/projected/fc9e53de-76a9-4fe0-b514-4769c31942b2-kube-api-access-kvss4\") pod \"fc9e53de-76a9-4fe0-b514-4769c31942b2\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.265491 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hv6ts\" (UniqueName: \"kubernetes.io/projected/5045b0ff-dc70-437b-9b7d-781bb39cc561-kube-api-access-hv6ts\") pod \"5045b0ff-dc70-437b-9b7d-781bb39cc561\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.265528 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5045b0ff-dc70-437b-9b7d-781bb39cc561-operator-scripts\") pod \"5045b0ff-dc70-437b-9b7d-781bb39cc561\" (UID: \"5045b0ff-dc70-437b-9b7d-781bb39cc561\") " Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.265589 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc9e53de-76a9-4fe0-b514-4769c31942b2-operator-scripts\") pod \"fc9e53de-76a9-4fe0-b514-4769c31942b2\" (UID: \"fc9e53de-76a9-4fe0-b514-4769c31942b2\") " Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.265979 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc9e53de-76a9-4fe0-b514-4769c31942b2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fc9e53de-76a9-4fe0-b514-4769c31942b2" (UID: "fc9e53de-76a9-4fe0-b514-4769c31942b2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.266098 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5045b0ff-dc70-437b-9b7d-781bb39cc561-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5045b0ff-dc70-437b-9b7d-781bb39cc561" (UID: "5045b0ff-dc70-437b-9b7d-781bb39cc561"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.271018 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5045b0ff-dc70-437b-9b7d-781bb39cc561-kube-api-access-hv6ts" (OuterVolumeSpecName: "kube-api-access-hv6ts") pod "5045b0ff-dc70-437b-9b7d-781bb39cc561" (UID: "5045b0ff-dc70-437b-9b7d-781bb39cc561"). InnerVolumeSpecName "kube-api-access-hv6ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.271565 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc9e53de-76a9-4fe0-b514-4769c31942b2-kube-api-access-kvss4" (OuterVolumeSpecName: "kube-api-access-kvss4") pod "fc9e53de-76a9-4fe0-b514-4769c31942b2" (UID: "fc9e53de-76a9-4fe0-b514-4769c31942b2"). InnerVolumeSpecName "kube-api-access-kvss4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.366843 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5045b0ff-dc70-437b-9b7d-781bb39cc561-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.367293 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc9e53de-76a9-4fe0-b514-4769c31942b2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.367306 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvss4\" (UniqueName: \"kubernetes.io/projected/fc9e53de-76a9-4fe0-b514-4769c31942b2-kube-api-access-kvss4\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.367319 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hv6ts\" (UniqueName: \"kubernetes.io/projected/5045b0ff-dc70-437b-9b7d-781bb39cc561-kube-api-access-hv6ts\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.844750 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.844774 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq" event={"ID":"5045b0ff-dc70-437b-9b7d-781bb39cc561","Type":"ContainerDied","Data":"1d4fb4934127b6bb13e5ae7df769d751a33b6e4829621066a277ba19aaab98e7"} Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.845130 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d4fb4934127b6bb13e5ae7df769d751a33b6e4829621066a277ba19aaab98e7" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.846434 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-6qmq8" event={"ID":"fc9e53de-76a9-4fe0-b514-4769c31942b2","Type":"ContainerDied","Data":"1be3840fca29dab9211ae21cb8270cf0b8316a0997f3ee314297008d37748387"} Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.846459 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1be3840fca29dab9211ae21cb8270cf0b8316a0997f3ee314297008d37748387" Jan 28 12:51:27 crc kubenswrapper[4685]: I0128 12:51:27.846510 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-6qmq8" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.261507 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-b48b4"] Jan 28 12:51:29 crc kubenswrapper[4685]: E0128 12:51:29.261846 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5045b0ff-dc70-437b-9b7d-781bb39cc561" containerName="mariadb-account-create-update" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.261862 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="5045b0ff-dc70-437b-9b7d-781bb39cc561" containerName="mariadb-account-create-update" Jan 28 12:51:29 crc kubenswrapper[4685]: E0128 12:51:29.261872 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc9e53de-76a9-4fe0-b514-4769c31942b2" containerName="mariadb-database-create" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.261879 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc9e53de-76a9-4fe0-b514-4769c31942b2" containerName="mariadb-database-create" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.261995 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc9e53de-76a9-4fe0-b514-4769c31942b2" containerName="mariadb-database-create" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.262016 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="5045b0ff-dc70-437b-9b7d-781bb39cc561" containerName="mariadb-account-create-update" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.262471 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.265359 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.265818 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-6lkxh" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.275758 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-b48b4"] Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.298486 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-db-sync-config-data\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.298563 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-config-data\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.298588 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwkjj\" (UniqueName: \"kubernetes.io/projected/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-kube-api-access-mwkjj\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.400903 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-db-sync-config-data\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.401027 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-config-data\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.401053 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwkjj\" (UniqueName: \"kubernetes.io/projected/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-kube-api-access-mwkjj\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.406781 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-db-sync-config-data\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.406898 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-config-data\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.438036 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwkjj\" (UniqueName: \"kubernetes.io/projected/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-kube-api-access-mwkjj\") pod \"glance-db-sync-b48b4\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.578187 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.797623 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-b48b4"] Jan 28 12:51:29 crc kubenswrapper[4685]: W0128 12:51:29.804361 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c1506b6_7536_4e5c_86ad_a8b2565cb5fb.slice/crio-796bb485222360ef0a3e2e865453a97a786cc633305e6c4feb8f4c95501ed250 WatchSource:0}: Error finding container 796bb485222360ef0a3e2e865453a97a786cc633305e6c4feb8f4c95501ed250: Status 404 returned error can't find the container with id 796bb485222360ef0a3e2e865453a97a786cc633305e6c4feb8f4c95501ed250 Jan 28 12:51:29 crc kubenswrapper[4685]: I0128 12:51:29.863755 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-b48b4" event={"ID":"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb","Type":"ContainerStarted","Data":"796bb485222360ef0a3e2e865453a97a786cc633305e6c4feb8f4c95501ed250"} Jan 28 12:51:30 crc kubenswrapper[4685]: I0128 12:51:30.871714 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-b48b4" event={"ID":"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb","Type":"ContainerStarted","Data":"3216208e75d8c67b16582c129fd898ea5e91d194e129ec283e6e14696c9b0b82"} Jan 28 12:51:30 crc kubenswrapper[4685]: I0128 12:51:30.887290 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-b48b4" podStartSLOduration=1.887272024 podStartE2EDuration="1.887272024s" podCreationTimestamp="2026-01-28 12:51:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:30.88464721 +0000 UTC m=+1841.972061055" watchObservedRunningTime="2026-01-28 12:51:30.887272024 +0000 UTC m=+1841.974685859" Jan 28 12:51:33 crc kubenswrapper[4685]: I0128 12:51:33.892487 4685 generic.go:334] "Generic (PLEG): container finished" podID="8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" containerID="3216208e75d8c67b16582c129fd898ea5e91d194e129ec283e6e14696c9b0b82" exitCode=0 Jan 28 12:51:33 crc kubenswrapper[4685]: I0128 12:51:33.892552 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-b48b4" event={"ID":"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb","Type":"ContainerDied","Data":"3216208e75d8c67b16582c129fd898ea5e91d194e129ec283e6e14696c9b0b82"} Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.222694 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.285538 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-config-data\") pod \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.286042 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwkjj\" (UniqueName: \"kubernetes.io/projected/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-kube-api-access-mwkjj\") pod \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.286083 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-db-sync-config-data\") pod \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\" (UID: \"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb\") " Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.291013 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" (UID: "8c1506b6-7536-4e5c-86ad-a8b2565cb5fb"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.291153 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-kube-api-access-mwkjj" (OuterVolumeSpecName: "kube-api-access-mwkjj") pod "8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" (UID: "8c1506b6-7536-4e5c-86ad-a8b2565cb5fb"). InnerVolumeSpecName "kube-api-access-mwkjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.319165 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-config-data" (OuterVolumeSpecName: "config-data") pod "8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" (UID: "8c1506b6-7536-4e5c-86ad-a8b2565cb5fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.387286 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.387314 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwkjj\" (UniqueName: \"kubernetes.io/projected/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-kube-api-access-mwkjj\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.387324 4685 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.910377 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-b48b4" event={"ID":"8c1506b6-7536-4e5c-86ad-a8b2565cb5fb","Type":"ContainerDied","Data":"796bb485222360ef0a3e2e865453a97a786cc633305e6c4feb8f4c95501ed250"} Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.910416 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="796bb485222360ef0a3e2e865453a97a786cc633305e6c4feb8f4c95501ed250" Jan 28 12:51:35 crc kubenswrapper[4685]: I0128 12:51:35.910474 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-b48b4" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.086887 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:51:37 crc kubenswrapper[4685]: E0128 12:51:37.087490 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" containerName="glance-db-sync" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.087503 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" containerName="glance-db-sync" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.087650 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" containerName="glance-db-sync" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.088497 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.092033 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.093228 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-single-config-data" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.093797 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-6lkxh" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.102263 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.212732 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-config-data\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.212783 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-nvme\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.212805 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-run\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.212826 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-logs\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.212843 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-httpd-run\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.212872 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-lib-modules\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.212968 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.213019 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-dev\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.213039 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.213066 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-scripts\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.213093 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q6b9\" (UniqueName: \"kubernetes.io/projected/e6e537d2-0469-4972-a384-5785f9e21c95-kube-api-access-4q6b9\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.213126 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.213151 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.213425 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-sys\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314280 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-dev\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314327 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314356 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-scripts\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314385 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q6b9\" (UniqueName: \"kubernetes.io/projected/e6e537d2-0469-4972-a384-5785f9e21c95-kube-api-access-4q6b9\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314388 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-dev\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314411 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314472 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-iscsi\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314572 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314599 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-var-locks-brick\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314623 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-sys\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314685 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-config-data\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314716 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-nvme\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314734 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-sys\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314739 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-run\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314771 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-run\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314827 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-logs\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314853 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-httpd-run\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314860 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") device mount path \"/mnt/openstack/pv12\"" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314891 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-lib-modules\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314925 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.314927 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-nvme\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.315014 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-lib-modules\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.315050 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") device mount path \"/mnt/openstack/pv04\"" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.315323 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-logs\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.315623 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-httpd-run\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.321496 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-scripts\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.321932 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-config-data\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.338797 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.342361 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.349597 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q6b9\" (UniqueName: \"kubernetes.io/projected/e6e537d2-0469-4972-a384-5785f9e21c95-kube-api-access-4q6b9\") pod \"glance-default-single-0\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.405969 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.831321 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:51:37 crc kubenswrapper[4685]: I0128 12:51:37.926675 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"e6e537d2-0469-4972-a384-5785f9e21c95","Type":"ContainerStarted","Data":"9f54a9dde8d16c2248f2887ddd582c88ad0b3496c8cf156bb5798f402bf6d4d1"} Jan 28 12:51:38 crc kubenswrapper[4685]: I0128 12:51:38.545581 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:51:38 crc kubenswrapper[4685]: E0128 12:51:38.546411 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:51:38 crc kubenswrapper[4685]: I0128 12:51:38.936302 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"e6e537d2-0469-4972-a384-5785f9e21c95","Type":"ContainerStarted","Data":"20982af0012b4a2581c1092f2ea7915acd6480e8fb0854b3a7a48f0d9e3a8a09"} Jan 28 12:51:38 crc kubenswrapper[4685]: I0128 12:51:38.936677 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"e6e537d2-0469-4972-a384-5785f9e21c95","Type":"ContainerStarted","Data":"f4763cbcfe2148f39a1b1b5af88637ded71ff38f49f235574f36b900b19ee92a"} Jan 28 12:51:38 crc kubenswrapper[4685]: I0128 12:51:38.959105 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-0" podStartSLOduration=1.959086604 podStartE2EDuration="1.959086604s" podCreationTimestamp="2026-01-28 12:51:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:38.957284393 +0000 UTC m=+1850.044698238" watchObservedRunningTime="2026-01-28 12:51:38.959086604 +0000 UTC m=+1850.046500449" Jan 28 12:51:47 crc kubenswrapper[4685]: I0128 12:51:47.407113 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:47 crc kubenswrapper[4685]: I0128 12:51:47.407633 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:47 crc kubenswrapper[4685]: I0128 12:51:47.433940 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:47 crc kubenswrapper[4685]: I0128 12:51:47.446524 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:48 crc kubenswrapper[4685]: I0128 12:51:48.014073 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:48 crc kubenswrapper[4685]: I0128 12:51:48.014129 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:49 crc kubenswrapper[4685]: I0128 12:51:49.984066 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:50 crc kubenswrapper[4685]: I0128 12:51:50.048548 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:51:50 crc kubenswrapper[4685]: I0128 12:51:50.063579 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.193567 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.203312 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.204534 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-single-2"] Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.205877 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.211775 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.218873 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-2"] Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.261645 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-iscsi\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.261693 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-dev\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.261714 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-run\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.261735 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-run\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.261863 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-config-data\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.261909 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.261955 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262040 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262324 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262353 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-nvme\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262422 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-dev\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262472 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-var-locks-brick\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262498 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-sys\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262512 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-logs\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262537 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-httpd-run\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262595 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75zjc\" (UniqueName: \"kubernetes.io/projected/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-kube-api-access-75zjc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262614 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-lib-modules\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262657 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-scripts\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262700 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262740 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-httpd-run\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262772 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-nvme\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262827 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-lib-modules\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262850 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-logs\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262901 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-config-data\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.262929 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7zwd\" (UniqueName: \"kubernetes.io/projected/49344d3b-6a61-405f-842a-b5f70383b376-kube-api-access-j7zwd\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.263075 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-sys\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.263134 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.263150 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-scripts\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365659 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-sys\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365756 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-sys\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365758 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365808 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-scripts\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365827 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-iscsi\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365850 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-dev\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365867 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-run\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365884 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-run\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365904 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-config-data\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365915 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-iscsi\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365923 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366006 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-run\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365951 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-run\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.365921 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-dev\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366125 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366139 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") device mount path \"/mnt/openstack/pv02\"" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366158 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366232 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366242 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-iscsi\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366252 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-nvme\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366294 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-nvme\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366329 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-var-locks-brick\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366368 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-dev\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366377 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-dev\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366419 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-var-locks-brick\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366467 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") device mount path \"/mnt/openstack/pv17\"" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366540 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-sys\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366491 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-sys\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366606 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-logs\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366481 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-var-locks-brick\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366664 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-httpd-run\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366729 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75zjc\" (UniqueName: \"kubernetes.io/projected/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-kube-api-access-75zjc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366772 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-lib-modules\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366818 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-scripts\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366861 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366892 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-lib-modules\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366904 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-httpd-run\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.366959 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-nvme\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367011 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-logs\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367056 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-lib-modules\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367106 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-config-data\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367161 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zwd\" (UniqueName: \"kubernetes.io/projected/49344d3b-6a61-405f-842a-b5f70383b376-kube-api-access-j7zwd\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367530 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-httpd-run\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367543 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-logs\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367587 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-nvme\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367123 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") device mount path \"/mnt/openstack/pv20\"" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.367817 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-lib-modules\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.368045 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") device mount path \"/mnt/openstack/pv13\"" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.370962 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-logs\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.372100 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-httpd-run\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.372441 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-config-data\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.372758 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-config-data\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.373634 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-scripts\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.387546 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-scripts\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.393061 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7zwd\" (UniqueName: \"kubernetes.io/projected/49344d3b-6a61-405f-842a-b5f70383b376-kube-api-access-j7zwd\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.398405 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.398574 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.402768 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75zjc\" (UniqueName: \"kubernetes.io/projected/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-kube-api-access-75zjc\") pod \"glance-default-single-2\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.408549 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.410451 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-single-1\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.524240 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.540863 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.545387 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:51:53 crc kubenswrapper[4685]: E0128 12:51:53.545654 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:51:53 crc kubenswrapper[4685]: I0128 12:51:53.974744 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:51:53 crc kubenswrapper[4685]: W0128 12:51:53.977574 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49344d3b_6a61_405f_842a_b5f70383b376.slice/crio-e771415d64ea29bd66d1bfd3114aae8e814328a0f91aa67a4647e7237ff05dd9 WatchSource:0}: Error finding container e771415d64ea29bd66d1bfd3114aae8e814328a0f91aa67a4647e7237ff05dd9: Status 404 returned error can't find the container with id e771415d64ea29bd66d1bfd3114aae8e814328a0f91aa67a4647e7237ff05dd9 Jan 28 12:51:54 crc kubenswrapper[4685]: I0128 12:51:54.028275 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-single-2"] Jan 28 12:51:54 crc kubenswrapper[4685]: W0128 12:51:54.039842 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7a9fc1e_083a_4e7e_889f_252cb21cd8fe.slice/crio-ea0e8116be91be8fd92c25983344b664e46e424a3746ceb2b569632d01bc19e9 WatchSource:0}: Error finding container ea0e8116be91be8fd92c25983344b664e46e424a3746ceb2b569632d01bc19e9: Status 404 returned error can't find the container with id ea0e8116be91be8fd92c25983344b664e46e424a3746ceb2b569632d01bc19e9 Jan 28 12:51:54 crc kubenswrapper[4685]: I0128 12:51:54.082297 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-2" event={"ID":"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe","Type":"ContainerStarted","Data":"ea0e8116be91be8fd92c25983344b664e46e424a3746ceb2b569632d01bc19e9"} Jan 28 12:51:54 crc kubenswrapper[4685]: I0128 12:51:54.083722 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"49344d3b-6a61-405f-842a-b5f70383b376","Type":"ContainerStarted","Data":"e771415d64ea29bd66d1bfd3114aae8e814328a0f91aa67a4647e7237ff05dd9"} Jan 28 12:51:55 crc kubenswrapper[4685]: I0128 12:51:55.092713 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-2" event={"ID":"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe","Type":"ContainerStarted","Data":"06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11"} Jan 28 12:51:55 crc kubenswrapper[4685]: I0128 12:51:55.094000 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-2" event={"ID":"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe","Type":"ContainerStarted","Data":"093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc"} Jan 28 12:51:55 crc kubenswrapper[4685]: I0128 12:51:55.096748 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"49344d3b-6a61-405f-842a-b5f70383b376","Type":"ContainerStarted","Data":"4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27"} Jan 28 12:51:55 crc kubenswrapper[4685]: I0128 12:51:55.096836 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"49344d3b-6a61-405f-842a-b5f70383b376","Type":"ContainerStarted","Data":"4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583"} Jan 28 12:51:55 crc kubenswrapper[4685]: I0128 12:51:55.150697 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-2" podStartSLOduration=3.15067356 podStartE2EDuration="3.15067356s" podCreationTimestamp="2026-01-28 12:51:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:55.12555061 +0000 UTC m=+1866.212964445" watchObservedRunningTime="2026-01-28 12:51:55.15067356 +0000 UTC m=+1866.238087395" Jan 28 12:51:55 crc kubenswrapper[4685]: I0128 12:51:55.152117 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-single-1" podStartSLOduration=3.152106591 podStartE2EDuration="3.152106591s" podCreationTimestamp="2026-01-28 12:51:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:51:55.146996326 +0000 UTC m=+1866.234410151" watchObservedRunningTime="2026-01-28 12:51:55.152106591 +0000 UTC m=+1866.239520426" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.525422 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.526031 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.541384 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.541444 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.549593 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.567545 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.587544 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:03 crc kubenswrapper[4685]: I0128 12:52:03.593896 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:04 crc kubenswrapper[4685]: I0128 12:52:04.167945 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:04 crc kubenswrapper[4685]: I0128 12:52:04.168024 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:04 crc kubenswrapper[4685]: I0128 12:52:04.168052 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:04 crc kubenswrapper[4685]: I0128 12:52:04.168075 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:04 crc kubenswrapper[4685]: I0128 12:52:04.550436 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:52:04 crc kubenswrapper[4685]: E0128 12:52:04.550824 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:52:06 crc kubenswrapper[4685]: I0128 12:52:06.166699 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:06 crc kubenswrapper[4685]: I0128 12:52:06.181268 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:52:06 crc kubenswrapper[4685]: I0128 12:52:06.198466 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:06 crc kubenswrapper[4685]: I0128 12:52:06.211653 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:06 crc kubenswrapper[4685]: I0128 12:52:06.211786 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:52:06 crc kubenswrapper[4685]: I0128 12:52:06.374829 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:07 crc kubenswrapper[4685]: I0128 12:52:07.616993 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-2"] Jan 28 12:52:07 crc kubenswrapper[4685]: I0128 12:52:07.625653 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:52:08 crc kubenswrapper[4685]: I0128 12:52:08.199104 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-2" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-log" containerID="cri-o://093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc" gracePeriod=30 Jan 28 12:52:08 crc kubenswrapper[4685]: I0128 12:52:08.199200 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-2" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-httpd" containerID="cri-o://06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11" gracePeriod=30 Jan 28 12:52:08 crc kubenswrapper[4685]: I0128 12:52:08.199245 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-1" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-log" containerID="cri-o://4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583" gracePeriod=30 Jan 28 12:52:08 crc kubenswrapper[4685]: I0128 12:52:08.199283 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-1" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-httpd" containerID="cri-o://4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27" gracePeriod=30 Jan 28 12:52:08 crc kubenswrapper[4685]: I0128 12:52:08.209850 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-single-2" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.0.137:9292/healthcheck\": EOF" Jan 28 12:52:08 crc kubenswrapper[4685]: I0128 12:52:08.209915 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-single-1" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.0.136:9292/healthcheck\": EOF" Jan 28 12:52:08 crc kubenswrapper[4685]: I0128 12:52:08.209932 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-single-1" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.0.136:9292/healthcheck\": EOF" Jan 28 12:52:09 crc kubenswrapper[4685]: I0128 12:52:09.209627 4685 generic.go:334] "Generic (PLEG): container finished" podID="49344d3b-6a61-405f-842a-b5f70383b376" containerID="4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583" exitCode=143 Jan 28 12:52:09 crc kubenswrapper[4685]: I0128 12:52:09.209707 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"49344d3b-6a61-405f-842a-b5f70383b376","Type":"ContainerDied","Data":"4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583"} Jan 28 12:52:09 crc kubenswrapper[4685]: I0128 12:52:09.213788 4685 generic.go:334] "Generic (PLEG): container finished" podID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerID="093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc" exitCode=143 Jan 28 12:52:09 crc kubenswrapper[4685]: I0128 12:52:09.213847 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-2" event={"ID":"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe","Type":"ContainerDied","Data":"093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc"} Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.702027 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.884692 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-dev\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.884802 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75zjc\" (UniqueName: \"kubernetes.io/projected/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-kube-api-access-75zjc\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.884819 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-dev" (OuterVolumeSpecName: "dev") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.884882 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.884824 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-iscsi\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.884993 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885094 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-var-locks-brick\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885140 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-config-data\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885196 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-lib-modules\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885202 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885224 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-nvme\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885259 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885279 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-sys\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885314 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-scripts\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885355 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-sys" (OuterVolumeSpecName: "sys") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885400 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-logs\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885452 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885508 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-run\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885420 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885559 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-httpd-run\") pod \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\" (UID: \"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe\") " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885610 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-run" (OuterVolumeSpecName: "run") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.885917 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886002 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-logs" (OuterVolumeSpecName: "logs") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886567 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886640 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886676 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886690 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886700 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886711 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886723 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886754 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.886767 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.891337 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage20-crc" (OuterVolumeSpecName: "glance-cache") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "local-storage20-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.891355 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.891703 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-kube-api-access-75zjc" (OuterVolumeSpecName: "kube-api-access-75zjc") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "kube-api-access-75zjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.891703 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-scripts" (OuterVolumeSpecName: "scripts") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.931781 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-config-data" (OuterVolumeSpecName: "config-data") pod "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" (UID: "d7a9fc1e-083a-4e7e-889f-252cb21cd8fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.954024 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.988024 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75zjc\" (UniqueName: \"kubernetes.io/projected/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-kube-api-access-75zjc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.988090 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" " Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.988105 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.988116 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:11 crc kubenswrapper[4685]: I0128 12:52:11.988135 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.009195 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.010576 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage20-crc" (UniqueName: "kubernetes.io/local-volume/local-storage20-crc") on node "crc" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089351 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-dev\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089406 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-config-data\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089431 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-httpd-run\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089441 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-dev" (OuterVolumeSpecName: "dev") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089465 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-logs\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089484 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7zwd\" (UniqueName: \"kubernetes.io/projected/49344d3b-6a61-405f-842a-b5f70383b376-kube-api-access-j7zwd\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089511 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-iscsi\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089534 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-lib-modules\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089551 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089590 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-scripts\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089607 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089633 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-run\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089647 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-var-locks-brick\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089669 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-nvme\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089687 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-sys\") pod \"49344d3b-6a61-405f-842a-b5f70383b376\" (UID: \"49344d3b-6a61-405f-842a-b5f70383b376\") " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089695 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089978 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.089992 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090001 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage20-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage20-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090010 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090015 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-run" (OuterVolumeSpecName: "run") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090067 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-sys" (OuterVolumeSpecName: "sys") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090070 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090098 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090103 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090120 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.090464 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-logs" (OuterVolumeSpecName: "logs") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.092272 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-scripts" (OuterVolumeSpecName: "scripts") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.092297 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage17-crc" (OuterVolumeSpecName: "glance") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "local-storage17-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.093226 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage13-crc" (OuterVolumeSpecName: "glance-cache") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "local-storage13-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.093274 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49344d3b-6a61-405f-842a-b5f70383b376-kube-api-access-j7zwd" (OuterVolumeSpecName: "kube-api-access-j7zwd") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "kube-api-access-j7zwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.119518 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-config-data" (OuterVolumeSpecName: "config-data") pod "49344d3b-6a61-405f-842a-b5f70383b376" (UID: "49344d3b-6a61-405f-842a-b5f70383b376"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191752 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49344d3b-6a61-405f-842a-b5f70383b376-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191804 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7zwd\" (UniqueName: \"kubernetes.io/projected/49344d3b-6a61-405f-842a-b5f70383b376-kube-api-access-j7zwd\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191825 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191843 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191901 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") on node \"crc\" " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191919 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191940 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" " Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191957 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191975 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.191990 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.192005 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/49344d3b-6a61-405f-842a-b5f70383b376-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.192019 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49344d3b-6a61-405f-842a-b5f70383b376-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.204376 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage17-crc" (UniqueName: "kubernetes.io/local-volume/local-storage17-crc") on node "crc" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.204875 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage13-crc" (UniqueName: "kubernetes.io/local-volume/local-storage13-crc") on node "crc" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.236676 4685 generic.go:334] "Generic (PLEG): container finished" podID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerID="06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11" exitCode=0 Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.236755 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-2" event={"ID":"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe","Type":"ContainerDied","Data":"06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11"} Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.236783 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-2" event={"ID":"d7a9fc1e-083a-4e7e-889f-252cb21cd8fe","Type":"ContainerDied","Data":"ea0e8116be91be8fd92c25983344b664e46e424a3746ceb2b569632d01bc19e9"} Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.236804 4685 scope.go:117] "RemoveContainer" containerID="06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.236942 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-2" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.239401 4685 generic.go:334] "Generic (PLEG): container finished" podID="49344d3b-6a61-405f-842a-b5f70383b376" containerID="4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27" exitCode=0 Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.239433 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-1" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.239451 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"49344d3b-6a61-405f-842a-b5f70383b376","Type":"ContainerDied","Data":"4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27"} Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.239490 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-1" event={"ID":"49344d3b-6a61-405f-842a-b5f70383b376","Type":"ContainerDied","Data":"e771415d64ea29bd66d1bfd3114aae8e814328a0f91aa67a4647e7237ff05dd9"} Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.262886 4685 scope.go:117] "RemoveContainer" containerID="093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.272062 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-2"] Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.282377 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-2"] Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.292842 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.292894 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.294270 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.296584 4685 scope.go:117] "RemoveContainer" containerID="06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11" Jan 28 12:52:12 crc kubenswrapper[4685]: E0128 12:52:12.296944 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11\": container with ID starting with 06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11 not found: ID does not exist" containerID="06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.296980 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11"} err="failed to get container status \"06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11\": rpc error: code = NotFound desc = could not find container \"06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11\": container with ID starting with 06e9c96ad56e9ccbbff75a9ba37df97b704b177b9c8bd4e406ae80fb7c122d11 not found: ID does not exist" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.297007 4685 scope.go:117] "RemoveContainer" containerID="093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc" Jan 28 12:52:12 crc kubenswrapper[4685]: E0128 12:52:12.297360 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc\": container with ID starting with 093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc not found: ID does not exist" containerID="093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.297419 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc"} err="failed to get container status \"093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc\": rpc error: code = NotFound desc = could not find container \"093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc\": container with ID starting with 093555f943373e05a1360372c82e77728fae54ec595e080b29d6dab195ad47cc not found: ID does not exist" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.297447 4685 scope.go:117] "RemoveContainer" containerID="4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.299757 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-1"] Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.313994 4685 scope.go:117] "RemoveContainer" containerID="4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.333269 4685 scope.go:117] "RemoveContainer" containerID="4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27" Jan 28 12:52:12 crc kubenswrapper[4685]: E0128 12:52:12.333759 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27\": container with ID starting with 4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27 not found: ID does not exist" containerID="4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.333807 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27"} err="failed to get container status \"4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27\": rpc error: code = NotFound desc = could not find container \"4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27\": container with ID starting with 4d2f006e494024560073e69902e661e223c93ca9bffd6f362d54553907378e27 not found: ID does not exist" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.333845 4685 scope.go:117] "RemoveContainer" containerID="4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583" Jan 28 12:52:12 crc kubenswrapper[4685]: E0128 12:52:12.334220 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583\": container with ID starting with 4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583 not found: ID does not exist" containerID="4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.334254 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583"} err="failed to get container status \"4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583\": rpc error: code = NotFound desc = could not find container \"4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583\": container with ID starting with 4230dcbcd1eeb7b4adcb421ec10f05eeeb80e43a53b50681dd0f092a2e418583 not found: ID does not exist" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.554310 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49344d3b-6a61-405f-842a-b5f70383b376" path="/var/lib/kubelet/pods/49344d3b-6a61-405f-842a-b5f70383b376/volumes" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.555209 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" path="/var/lib/kubelet/pods/d7a9fc1e-083a-4e7e-889f-252cb21cd8fe/volumes" Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.875208 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.875501 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-log" containerID="cri-o://f4763cbcfe2148f39a1b1b5af88637ded71ff38f49f235574f36b900b19ee92a" gracePeriod=30 Jan 28 12:52:12 crc kubenswrapper[4685]: I0128 12:52:12.875619 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-single-0" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-httpd" containerID="cri-o://20982af0012b4a2581c1092f2ea7915acd6480e8fb0854b3a7a48f0d9e3a8a09" gracePeriod=30 Jan 28 12:52:13 crc kubenswrapper[4685]: I0128 12:52:13.249483 4685 generic.go:334] "Generic (PLEG): container finished" podID="e6e537d2-0469-4972-a384-5785f9e21c95" containerID="f4763cbcfe2148f39a1b1b5af88637ded71ff38f49f235574f36b900b19ee92a" exitCode=143 Jan 28 12:52:13 crc kubenswrapper[4685]: I0128 12:52:13.249795 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"e6e537d2-0469-4972-a384-5785f9e21c95","Type":"ContainerDied","Data":"f4763cbcfe2148f39a1b1b5af88637ded71ff38f49f235574f36b900b19ee92a"} Jan 28 12:52:15 crc kubenswrapper[4685]: I0128 12:52:15.545661 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:52:15 crc kubenswrapper[4685]: E0128 12:52:15.546266 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.282921 4685 generic.go:334] "Generic (PLEG): container finished" podID="e6e537d2-0469-4972-a384-5785f9e21c95" containerID="20982af0012b4a2581c1092f2ea7915acd6480e8fb0854b3a7a48f0d9e3a8a09" exitCode=0 Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.283020 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"e6e537d2-0469-4972-a384-5785f9e21c95","Type":"ContainerDied","Data":"20982af0012b4a2581c1092f2ea7915acd6480e8fb0854b3a7a48f0d9e3a8a09"} Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.387357 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557385 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-lib-modules\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557416 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557439 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-run\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557480 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-httpd-run\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557469 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557503 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-sys\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557543 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-sys" (OuterVolumeSpecName: "sys") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557610 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-dev\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557675 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-nvme\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.557987 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-var-locks-brick\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558030 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-logs\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558080 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q6b9\" (UniqueName: \"kubernetes.io/projected/e6e537d2-0469-4972-a384-5785f9e21c95-kube-api-access-4q6b9\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558137 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-iscsi\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558215 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-config-data\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558251 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-scripts\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558299 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"e6e537d2-0469-4972-a384-5785f9e21c95\" (UID: \"e6e537d2-0469-4972-a384-5785f9e21c95\") " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558419 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558416 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558449 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-run" (OuterVolumeSpecName: "run") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558479 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558550 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-dev" (OuterVolumeSpecName: "dev") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.558931 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559165 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-logs" (OuterVolumeSpecName: "logs") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559523 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559556 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559574 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559590 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559609 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559624 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559640 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559657 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/e6e537d2-0469-4972-a384-5785f9e21c95-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.559674 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6e537d2-0469-4972-a384-5785f9e21c95-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.563503 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.563591 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance-cache") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.564487 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6e537d2-0469-4972-a384-5785f9e21c95-kube-api-access-4q6b9" (OuterVolumeSpecName: "kube-api-access-4q6b9") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "kube-api-access-4q6b9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.566130 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-scripts" (OuterVolumeSpecName: "scripts") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.605711 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-config-data" (OuterVolumeSpecName: "config-data") pod "e6e537d2-0469-4972-a384-5785f9e21c95" (UID: "e6e537d2-0469-4972-a384-5785f9e21c95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.661339 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q6b9\" (UniqueName: \"kubernetes.io/projected/e6e537d2-0469-4972-a384-5785f9e21c95-kube-api-access-4q6b9\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.661400 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.661411 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6e537d2-0469-4972-a384-5785f9e21c95-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.661432 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.661444 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.676033 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.676408 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.762550 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:16 crc kubenswrapper[4685]: I0128 12:52:16.762581 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:17 crc kubenswrapper[4685]: I0128 12:52:17.309580 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-single-0" event={"ID":"e6e537d2-0469-4972-a384-5785f9e21c95","Type":"ContainerDied","Data":"9f54a9dde8d16c2248f2887ddd582c88ad0b3496c8cf156bb5798f402bf6d4d1"} Jan 28 12:52:17 crc kubenswrapper[4685]: I0128 12:52:17.309631 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-single-0" Jan 28 12:52:17 crc kubenswrapper[4685]: I0128 12:52:17.309635 4685 scope.go:117] "RemoveContainer" containerID="20982af0012b4a2581c1092f2ea7915acd6480e8fb0854b3a7a48f0d9e3a8a09" Jan 28 12:52:17 crc kubenswrapper[4685]: I0128 12:52:17.332875 4685 scope.go:117] "RemoveContainer" containerID="f4763cbcfe2148f39a1b1b5af88637ded71ff38f49f235574f36b900b19ee92a" Jan 28 12:52:17 crc kubenswrapper[4685]: I0128 12:52:17.335904 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:52:17 crc kubenswrapper[4685]: I0128 12:52:17.340910 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-single-0"] Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.330925 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-b48b4"] Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.337920 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-b48b4"] Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371330 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance6fc2-account-delete-ndhhk"] Jan 28 12:52:18 crc kubenswrapper[4685]: E0128 12:52:18.371622 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371639 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: E0128 12:52:18.371654 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371660 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: E0128 12:52:18.371680 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371686 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: E0128 12:52:18.371698 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371704 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: E0128 12:52:18.371714 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371719 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: E0128 12:52:18.371730 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371736 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371845 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371857 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371865 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="49344d3b-6a61-405f-842a-b5f70383b376" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371873 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-log" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371879 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7a9fc1e-083a-4e7e-889f-252cb21cd8fe" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.371893 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" containerName="glance-httpd" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.372358 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.386515 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance6fc2-account-delete-ndhhk"] Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.489019 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-operator-scripts\") pod \"glance6fc2-account-delete-ndhhk\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.489100 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9g75\" (UniqueName: \"kubernetes.io/projected/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-kube-api-access-n9g75\") pod \"glance6fc2-account-delete-ndhhk\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.555022 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c1506b6-7536-4e5c-86ad-a8b2565cb5fb" path="/var/lib/kubelet/pods/8c1506b6-7536-4e5c-86ad-a8b2565cb5fb/volumes" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.556015 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6e537d2-0469-4972-a384-5785f9e21c95" path="/var/lib/kubelet/pods/e6e537d2-0469-4972-a384-5785f9e21c95/volumes" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.590479 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-operator-scripts\") pod \"glance6fc2-account-delete-ndhhk\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.590571 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9g75\" (UniqueName: \"kubernetes.io/projected/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-kube-api-access-n9g75\") pod \"glance6fc2-account-delete-ndhhk\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.591256 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-operator-scripts\") pod \"glance6fc2-account-delete-ndhhk\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.618146 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9g75\" (UniqueName: \"kubernetes.io/projected/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-kube-api-access-n9g75\") pod \"glance6fc2-account-delete-ndhhk\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.690210 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:18 crc kubenswrapper[4685]: I0128 12:52:18.967225 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance6fc2-account-delete-ndhhk"] Jan 28 12:52:19 crc kubenswrapper[4685]: I0128 12:52:19.331430 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" event={"ID":"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb","Type":"ContainerStarted","Data":"8a9f41f7ac4d14cefb86b26759a10f2a36d29a2f7fb52aa5f9f09af757385cf7"} Jan 28 12:52:19 crc kubenswrapper[4685]: I0128 12:52:19.331774 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" event={"ID":"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb","Type":"ContainerStarted","Data":"60b62f0c32ebd02b280da4483290e937a1a38433eb6c2d8543aa67ec06ed98aa"} Jan 28 12:52:20 crc kubenswrapper[4685]: I0128 12:52:20.342635 4685 generic.go:334] "Generic (PLEG): container finished" podID="cb03b5e3-1269-4b33-bb2b-2a88788fe7cb" containerID="8a9f41f7ac4d14cefb86b26759a10f2a36d29a2f7fb52aa5f9f09af757385cf7" exitCode=0 Jan 28 12:52:20 crc kubenswrapper[4685]: I0128 12:52:20.342928 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" event={"ID":"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb","Type":"ContainerDied","Data":"8a9f41f7ac4d14cefb86b26759a10f2a36d29a2f7fb52aa5f9f09af757385cf7"} Jan 28 12:52:21 crc kubenswrapper[4685]: I0128 12:52:21.623461 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:21 crc kubenswrapper[4685]: I0128 12:52:21.742071 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-operator-scripts\") pod \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " Jan 28 12:52:21 crc kubenswrapper[4685]: I0128 12:52:21.742222 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9g75\" (UniqueName: \"kubernetes.io/projected/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-kube-api-access-n9g75\") pod \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\" (UID: \"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb\") " Jan 28 12:52:21 crc kubenswrapper[4685]: I0128 12:52:21.742582 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cb03b5e3-1269-4b33-bb2b-2a88788fe7cb" (UID: "cb03b5e3-1269-4b33-bb2b-2a88788fe7cb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:21 crc kubenswrapper[4685]: I0128 12:52:21.747821 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-kube-api-access-n9g75" (OuterVolumeSpecName: "kube-api-access-n9g75") pod "cb03b5e3-1269-4b33-bb2b-2a88788fe7cb" (UID: "cb03b5e3-1269-4b33-bb2b-2a88788fe7cb"). InnerVolumeSpecName "kube-api-access-n9g75". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:21 crc kubenswrapper[4685]: I0128 12:52:21.844593 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9g75\" (UniqueName: \"kubernetes.io/projected/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-kube-api-access-n9g75\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:21 crc kubenswrapper[4685]: I0128 12:52:21.844667 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:22 crc kubenswrapper[4685]: I0128 12:52:22.357551 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" event={"ID":"cb03b5e3-1269-4b33-bb2b-2a88788fe7cb","Type":"ContainerDied","Data":"60b62f0c32ebd02b280da4483290e937a1a38433eb6c2d8543aa67ec06ed98aa"} Jan 28 12:52:22 crc kubenswrapper[4685]: I0128 12:52:22.357593 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60b62f0c32ebd02b280da4483290e937a1a38433eb6c2d8543aa67ec06ed98aa" Jan 28 12:52:22 crc kubenswrapper[4685]: I0128 12:52:22.357632 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance6fc2-account-delete-ndhhk" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.389935 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-create-6qmq8"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.396299 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-create-6qmq8"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.410070 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance6fc2-account-delete-ndhhk"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.415856 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.421683 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance6fc2-account-delete-ndhhk"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.428152 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-6fc2-account-create-update-4g6qq"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.501093 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-create-d86jc"] Jan 28 12:52:23 crc kubenswrapper[4685]: E0128 12:52:23.501403 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb03b5e3-1269-4b33-bb2b-2a88788fe7cb" containerName="mariadb-account-delete" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.501418 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb03b5e3-1269-4b33-bb2b-2a88788fe7cb" containerName="mariadb-account-delete" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.501577 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb03b5e3-1269-4b33-bb2b-2a88788fe7cb" containerName="mariadb-account-delete" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.502092 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.509651 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-d86jc"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.567524 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89vsg\" (UniqueName: \"kubernetes.io/projected/78ca937e-35ea-48eb-b659-705a105a7578-kube-api-access-89vsg\") pod \"glance-db-create-d86jc\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.567605 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78ca937e-35ea-48eb-b659-705a105a7578-operator-scripts\") pod \"glance-db-create-d86jc\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.590946 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-4464-account-create-update-2tch8"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.591754 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.595293 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-db-secret" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.598883 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-4464-account-create-update-2tch8"] Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.669938 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89vsg\" (UniqueName: \"kubernetes.io/projected/78ca937e-35ea-48eb-b659-705a105a7578-kube-api-access-89vsg\") pod \"glance-db-create-d86jc\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.670300 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-operator-scripts\") pod \"glance-4464-account-create-update-2tch8\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.670431 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgc2m\" (UniqueName: \"kubernetes.io/projected/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-kube-api-access-lgc2m\") pod \"glance-4464-account-create-update-2tch8\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.670543 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78ca937e-35ea-48eb-b659-705a105a7578-operator-scripts\") pod \"glance-db-create-d86jc\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.671431 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78ca937e-35ea-48eb-b659-705a105a7578-operator-scripts\") pod \"glance-db-create-d86jc\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.686203 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89vsg\" (UniqueName: \"kubernetes.io/projected/78ca937e-35ea-48eb-b659-705a105a7578-kube-api-access-89vsg\") pod \"glance-db-create-d86jc\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.772087 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-operator-scripts\") pod \"glance-4464-account-create-update-2tch8\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.772132 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgc2m\" (UniqueName: \"kubernetes.io/projected/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-kube-api-access-lgc2m\") pod \"glance-4464-account-create-update-2tch8\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.772839 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-operator-scripts\") pod \"glance-4464-account-create-update-2tch8\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.799021 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgc2m\" (UniqueName: \"kubernetes.io/projected/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-kube-api-access-lgc2m\") pod \"glance-4464-account-create-update-2tch8\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.831013 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:23 crc kubenswrapper[4685]: I0128 12:52:23.906793 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:24 crc kubenswrapper[4685]: I0128 12:52:24.240849 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-create-d86jc"] Jan 28 12:52:24 crc kubenswrapper[4685]: I0128 12:52:24.303459 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-4464-account-create-update-2tch8"] Jan 28 12:52:24 crc kubenswrapper[4685]: W0128 12:52:24.306917 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad1794ca_ecfc_4d90_bcf7_aa5bcda7f44c.slice/crio-d05f6212affdb448dcda23f1b2825639b81855a8957aa14d009980a6c3f51416 WatchSource:0}: Error finding container d05f6212affdb448dcda23f1b2825639b81855a8957aa14d009980a6c3f51416: Status 404 returned error can't find the container with id d05f6212affdb448dcda23f1b2825639b81855a8957aa14d009980a6c3f51416 Jan 28 12:52:24 crc kubenswrapper[4685]: I0128 12:52:24.376095 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" event={"ID":"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c","Type":"ContainerStarted","Data":"d05f6212affdb448dcda23f1b2825639b81855a8957aa14d009980a6c3f51416"} Jan 28 12:52:24 crc kubenswrapper[4685]: I0128 12:52:24.377565 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-d86jc" event={"ID":"78ca937e-35ea-48eb-b659-705a105a7578","Type":"ContainerStarted","Data":"1cd11f9b83fbd2afa96ec94ca507f0ea3020eee7048cc99bd607bc7fc6bd8019"} Jan 28 12:52:24 crc kubenswrapper[4685]: I0128 12:52:24.554184 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5045b0ff-dc70-437b-9b7d-781bb39cc561" path="/var/lib/kubelet/pods/5045b0ff-dc70-437b-9b7d-781bb39cc561/volumes" Jan 28 12:52:24 crc kubenswrapper[4685]: I0128 12:52:24.555003 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb03b5e3-1269-4b33-bb2b-2a88788fe7cb" path="/var/lib/kubelet/pods/cb03b5e3-1269-4b33-bb2b-2a88788fe7cb/volumes" Jan 28 12:52:24 crc kubenswrapper[4685]: I0128 12:52:24.555564 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc9e53de-76a9-4fe0-b514-4769c31942b2" path="/var/lib/kubelet/pods/fc9e53de-76a9-4fe0-b514-4769c31942b2/volumes" Jan 28 12:52:25 crc kubenswrapper[4685]: I0128 12:52:25.390762 4685 generic.go:334] "Generic (PLEG): container finished" podID="78ca937e-35ea-48eb-b659-705a105a7578" containerID="7775e5f2117fc7c2925d3467035960efedd8628cba956f1b78ab9d4952f337b5" exitCode=0 Jan 28 12:52:25 crc kubenswrapper[4685]: I0128 12:52:25.390850 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-d86jc" event={"ID":"78ca937e-35ea-48eb-b659-705a105a7578","Type":"ContainerDied","Data":"7775e5f2117fc7c2925d3467035960efedd8628cba956f1b78ab9d4952f337b5"} Jan 28 12:52:25 crc kubenswrapper[4685]: I0128 12:52:25.393624 4685 generic.go:334] "Generic (PLEG): container finished" podID="ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c" containerID="982196cf9d1430c23f61c9f6a591d2e502a122f68dd3d208f2823fe1bcd16495" exitCode=0 Jan 28 12:52:25 crc kubenswrapper[4685]: I0128 12:52:25.393671 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" event={"ID":"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c","Type":"ContainerDied","Data":"982196cf9d1430c23f61c9f6a591d2e502a122f68dd3d208f2823fe1bcd16495"} Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.743998 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.751374 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.820775 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-operator-scripts\") pod \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.820965 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgc2m\" (UniqueName: \"kubernetes.io/projected/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-kube-api-access-lgc2m\") pod \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\" (UID: \"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c\") " Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.820999 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89vsg\" (UniqueName: \"kubernetes.io/projected/78ca937e-35ea-48eb-b659-705a105a7578-kube-api-access-89vsg\") pod \"78ca937e-35ea-48eb-b659-705a105a7578\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.821038 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78ca937e-35ea-48eb-b659-705a105a7578-operator-scripts\") pod \"78ca937e-35ea-48eb-b659-705a105a7578\" (UID: \"78ca937e-35ea-48eb-b659-705a105a7578\") " Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.821876 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c" (UID: "ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.821948 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78ca937e-35ea-48eb-b659-705a105a7578-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "78ca937e-35ea-48eb-b659-705a105a7578" (UID: "78ca937e-35ea-48eb-b659-705a105a7578"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.822220 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78ca937e-35ea-48eb-b659-705a105a7578-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.822238 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.826827 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-kube-api-access-lgc2m" (OuterVolumeSpecName: "kube-api-access-lgc2m") pod "ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c" (UID: "ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c"). InnerVolumeSpecName "kube-api-access-lgc2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.828373 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78ca937e-35ea-48eb-b659-705a105a7578-kube-api-access-89vsg" (OuterVolumeSpecName: "kube-api-access-89vsg") pod "78ca937e-35ea-48eb-b659-705a105a7578" (UID: "78ca937e-35ea-48eb-b659-705a105a7578"). InnerVolumeSpecName "kube-api-access-89vsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.924098 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgc2m\" (UniqueName: \"kubernetes.io/projected/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c-kube-api-access-lgc2m\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:26 crc kubenswrapper[4685]: I0128 12:52:26.924147 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89vsg\" (UniqueName: \"kubernetes.io/projected/78ca937e-35ea-48eb-b659-705a105a7578-kube-api-access-89vsg\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:27 crc kubenswrapper[4685]: I0128 12:52:27.408641 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" event={"ID":"ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c","Type":"ContainerDied","Data":"d05f6212affdb448dcda23f1b2825639b81855a8957aa14d009980a6c3f51416"} Jan 28 12:52:27 crc kubenswrapper[4685]: I0128 12:52:27.408690 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d05f6212affdb448dcda23f1b2825639b81855a8957aa14d009980a6c3f51416" Jan 28 12:52:27 crc kubenswrapper[4685]: I0128 12:52:27.408752 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-4464-account-create-update-2tch8" Jan 28 12:52:27 crc kubenswrapper[4685]: I0128 12:52:27.416009 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-create-d86jc" event={"ID":"78ca937e-35ea-48eb-b659-705a105a7578","Type":"ContainerDied","Data":"1cd11f9b83fbd2afa96ec94ca507f0ea3020eee7048cc99bd607bc7fc6bd8019"} Jan 28 12:52:27 crc kubenswrapper[4685]: I0128 12:52:27.416046 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cd11f9b83fbd2afa96ec94ca507f0ea3020eee7048cc99bd607bc7fc6bd8019" Jan 28 12:52:27 crc kubenswrapper[4685]: I0128 12:52:27.416070 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-create-d86jc" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.745748 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-db-sync-n8m6j"] Jan 28 12:52:28 crc kubenswrapper[4685]: E0128 12:52:28.746452 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78ca937e-35ea-48eb-b659-705a105a7578" containerName="mariadb-database-create" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.746470 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="78ca937e-35ea-48eb-b659-705a105a7578" containerName="mariadb-database-create" Jan 28 12:52:28 crc kubenswrapper[4685]: E0128 12:52:28.746513 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c" containerName="mariadb-account-create-update" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.746524 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c" containerName="mariadb-account-create-update" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.746682 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="78ca937e-35ea-48eb-b659-705a105a7578" containerName="mariadb-database-create" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.746710 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c" containerName="mariadb-account-create-update" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.747297 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.749414 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-sjjkb" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.749462 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-config-data" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.755071 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-n8m6j"] Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.849795 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-config-data\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.849928 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-db-sync-config-data\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.849995 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpnkw\" (UniqueName: \"kubernetes.io/projected/1c9965be-6aca-45c5-811c-16b0444568da-kube-api-access-lpnkw\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.951118 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-db-sync-config-data\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.951294 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpnkw\" (UniqueName: \"kubernetes.io/projected/1c9965be-6aca-45c5-811c-16b0444568da-kube-api-access-lpnkw\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.951361 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-config-data\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.955518 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-config-data\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.963740 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-db-sync-config-data\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:28 crc kubenswrapper[4685]: I0128 12:52:28.972404 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpnkw\" (UniqueName: \"kubernetes.io/projected/1c9965be-6aca-45c5-811c-16b0444568da-kube-api-access-lpnkw\") pod \"glance-db-sync-n8m6j\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:29 crc kubenswrapper[4685]: I0128 12:52:29.072155 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:29 crc kubenswrapper[4685]: I0128 12:52:29.500342 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-db-sync-n8m6j"] Jan 28 12:52:30 crc kubenswrapper[4685]: I0128 12:52:30.445960 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-n8m6j" event={"ID":"1c9965be-6aca-45c5-811c-16b0444568da","Type":"ContainerStarted","Data":"8fb9fcad1766edb8d5495cc4d91f9a6d2bc44d76fba4636a9b3d7228538ef936"} Jan 28 12:52:30 crc kubenswrapper[4685]: I0128 12:52:30.446368 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-n8m6j" event={"ID":"1c9965be-6aca-45c5-811c-16b0444568da","Type":"ContainerStarted","Data":"05f199734e0885ad3b29e7c490ef39bfdcfe5c3b11f7b1a03a2550e9bc427d9c"} Jan 28 12:52:30 crc kubenswrapper[4685]: I0128 12:52:30.464139 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-db-sync-n8m6j" podStartSLOduration=2.464116865 podStartE2EDuration="2.464116865s" podCreationTimestamp="2026-01-28 12:52:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:30.460879583 +0000 UTC m=+1901.548293448" watchObservedRunningTime="2026-01-28 12:52:30.464116865 +0000 UTC m=+1901.551530710" Jan 28 12:52:30 crc kubenswrapper[4685]: I0128 12:52:30.549585 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:52:31 crc kubenswrapper[4685]: I0128 12:52:31.458598 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"be50d96e347898fc35ed6dffd9d30ab7cf139d963b1551cf0b7bbde10cd2350c"} Jan 28 12:52:33 crc kubenswrapper[4685]: I0128 12:52:33.479297 4685 generic.go:334] "Generic (PLEG): container finished" podID="1c9965be-6aca-45c5-811c-16b0444568da" containerID="8fb9fcad1766edb8d5495cc4d91f9a6d2bc44d76fba4636a9b3d7228538ef936" exitCode=0 Jan 28 12:52:33 crc kubenswrapper[4685]: I0128 12:52:33.479404 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-n8m6j" event={"ID":"1c9965be-6aca-45c5-811c-16b0444568da","Type":"ContainerDied","Data":"8fb9fcad1766edb8d5495cc4d91f9a6d2bc44d76fba4636a9b3d7228538ef936"} Jan 28 12:52:34 crc kubenswrapper[4685]: I0128 12:52:34.847241 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:34 crc kubenswrapper[4685]: I0128 12:52:34.940978 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-db-sync-config-data\") pod \"1c9965be-6aca-45c5-811c-16b0444568da\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " Jan 28 12:52:34 crc kubenswrapper[4685]: I0128 12:52:34.941047 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-config-data\") pod \"1c9965be-6aca-45c5-811c-16b0444568da\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " Jan 28 12:52:34 crc kubenswrapper[4685]: I0128 12:52:34.941248 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpnkw\" (UniqueName: \"kubernetes.io/projected/1c9965be-6aca-45c5-811c-16b0444568da-kube-api-access-lpnkw\") pod \"1c9965be-6aca-45c5-811c-16b0444568da\" (UID: \"1c9965be-6aca-45c5-811c-16b0444568da\") " Jan 28 12:52:34 crc kubenswrapper[4685]: I0128 12:52:34.946986 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c9965be-6aca-45c5-811c-16b0444568da-kube-api-access-lpnkw" (OuterVolumeSpecName: "kube-api-access-lpnkw") pod "1c9965be-6aca-45c5-811c-16b0444568da" (UID: "1c9965be-6aca-45c5-811c-16b0444568da"). InnerVolumeSpecName "kube-api-access-lpnkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:34 crc kubenswrapper[4685]: I0128 12:52:34.955534 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1c9965be-6aca-45c5-811c-16b0444568da" (UID: "1c9965be-6aca-45c5-811c-16b0444568da"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:35 crc kubenswrapper[4685]: I0128 12:52:35.012730 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-config-data" (OuterVolumeSpecName: "config-data") pod "1c9965be-6aca-45c5-811c-16b0444568da" (UID: "1c9965be-6aca-45c5-811c-16b0444568da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:35 crc kubenswrapper[4685]: I0128 12:52:35.043411 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpnkw\" (UniqueName: \"kubernetes.io/projected/1c9965be-6aca-45c5-811c-16b0444568da-kube-api-access-lpnkw\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:35 crc kubenswrapper[4685]: I0128 12:52:35.043459 4685 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:35 crc kubenswrapper[4685]: I0128 12:52:35.043471 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c9965be-6aca-45c5-811c-16b0444568da-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:35 crc kubenswrapper[4685]: I0128 12:52:35.512261 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-db-sync-n8m6j" event={"ID":"1c9965be-6aca-45c5-811c-16b0444568da","Type":"ContainerDied","Data":"05f199734e0885ad3b29e7c490ef39bfdcfe5c3b11f7b1a03a2550e9bc427d9c"} Jan 28 12:52:35 crc kubenswrapper[4685]: I0128 12:52:35.512319 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05f199734e0885ad3b29e7c490ef39bfdcfe5c3b11f7b1a03a2550e9bc427d9c" Jan 28 12:52:35 crc kubenswrapper[4685]: I0128 12:52:35.512339 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-db-sync-n8m6j" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.621863 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:52:36 crc kubenswrapper[4685]: E0128 12:52:36.622438 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9965be-6aca-45c5-811c-16b0444568da" containerName="glance-db-sync" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.622451 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9965be-6aca-45c5-811c-16b0444568da" containerName="glance-db-sync" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.622608 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c9965be-6aca-45c5-811c-16b0444568da" containerName="glance-db-sync" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.623291 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.625143 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-external-config-data" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.625143 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-scripts" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.626101 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-glance-dockercfg-sjjkb" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.652495 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.775969 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776034 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-logs\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776059 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776096 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776120 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqmcz\" (UniqueName: \"kubernetes.io/projected/0a15e6ca-b616-402c-b742-2ba7acbf5763-kube-api-access-qqmcz\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776143 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-dev\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776265 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776340 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-config-data\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776365 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-sys\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776396 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776423 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-run\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776464 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776492 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-scripts\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.776561 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877522 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-logs\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877579 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877633 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877663 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqmcz\" (UniqueName: \"kubernetes.io/projected/0a15e6ca-b616-402c-b742-2ba7acbf5763-kube-api-access-qqmcz\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877693 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-dev\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877722 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877754 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-config-data\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877777 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-sys\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877806 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877828 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-run\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877855 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877880 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-scripts\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877903 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.877950 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878106 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-logs\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878125 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-httpd-run\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878207 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-sys\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878225 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-nvme\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878240 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-dev\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878256 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-lib-modules\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878352 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") device mount path \"/mnt/openstack/pv17\"" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878539 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-var-locks-brick\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.878629 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") device mount path \"/mnt/openstack/pv13\"" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.880977 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-iscsi\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.881042 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-run\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.886542 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-scripts\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.887204 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-config-data\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.896528 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqmcz\" (UniqueName: \"kubernetes.io/projected/0a15e6ca-b616-402c-b742-2ba7acbf5763-kube-api-access-qqmcz\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.907514 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.911285 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"glance-default-external-api-1\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.941400 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.943835 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.945094 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:36 crc kubenswrapper[4685]: I0128 12:52:36.960892 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.015094 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.016278 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.018044 4685 reflector.go:368] Caches populated for *v1.Secret from object-"glance-kuttl-tests"/"glance-default-internal-config-data" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.047091 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.071210 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.072548 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080622 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080703 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080738 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080764 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-scripts\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080797 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-sys\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080822 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080841 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-logs\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080861 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-run\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080909 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-dev\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.080995 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-config-data\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.081029 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.081051 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.081081 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrdsj\" (UniqueName: \"kubernetes.io/projected/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-kube-api-access-rrdsj\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.081101 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.081118 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182304 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-run\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182351 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-logs\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182454 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182503 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182554 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182681 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182722 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182763 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182803 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182824 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182844 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-scripts\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182892 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182910 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-sys\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182942 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") device mount path \"/mnt/openstack/pv04\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182953 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182968 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-logs\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.182994 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183001 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-run\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183038 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-run\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183061 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183072 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-sys\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183084 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxvr8\" (UniqueName: \"kubernetes.io/projected/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-kube-api-access-zxvr8\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183142 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183160 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183314 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-dev\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183341 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-sys\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183367 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-dev\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183389 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183414 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-config-data\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183435 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183461 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-logs\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183465 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183525 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183544 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183565 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrdsj\" (UniqueName: \"kubernetes.io/projected/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-kube-api-access-rrdsj\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183635 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-dev\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183726 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183761 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") device mount path \"/mnt/openstack/pv12\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.183779 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.190763 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-scripts\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.191416 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-config-data\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.209043 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrdsj\" (UniqueName: \"kubernetes.io/projected/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-kube-api-access-rrdsj\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.209959 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.216049 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285089 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285150 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285212 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285245 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285272 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285302 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-config-data\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285322 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-dev\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285346 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285367 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxvr8\" (UniqueName: \"kubernetes.io/projected/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-kube-api-access-zxvr8\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285387 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285415 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-sys\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285434 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-sys\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285451 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-dev\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285467 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285486 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285507 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285534 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xmjs\" (UniqueName: \"kubernetes.io/projected/c8320012-ecf6-4659-a559-e4934d2aedc9-kube-api-access-9xmjs\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285559 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-run\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285579 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-logs\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285603 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285622 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285644 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-scripts\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285663 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285685 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-run\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285719 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285734 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-logs\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285751 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285767 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285881 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.285922 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286415 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-run\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286441 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286504 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286532 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-sys\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286672 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") device mount path \"/mnt/openstack/pv08\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286711 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-dev\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286720 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") device mount path \"/mnt/openstack/pv02\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.286925 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-logs\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.287129 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.291018 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.292948 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.304752 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxvr8\" (UniqueName: \"kubernetes.io/projected/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-kube-api-access-zxvr8\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.308098 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.308715 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.350371 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.358455 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.387361 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.387637 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-config-data\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.387759 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-dev\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.387871 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.387998 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-sys\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388135 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xmjs\" (UniqueName: \"kubernetes.io/projected/c8320012-ecf6-4659-a559-e4934d2aedc9-kube-api-access-9xmjs\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388288 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388393 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-scripts\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388490 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-run\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388591 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388702 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-logs\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388850 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.388954 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.389090 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.389425 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") device mount path \"/mnt/openstack/pv14\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.397399 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") device mount path \"/mnt/openstack/pv11\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.402588 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.402662 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.403476 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-logs\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.403530 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-dev\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.403767 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.403828 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.403866 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-sys\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.403885 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-run\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.403900 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.409820 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-scripts\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.417356 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-config-data\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.419907 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.422429 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.424232 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xmjs\" (UniqueName: \"kubernetes.io/projected/c8320012-ecf6-4659-a559-e4934d2aedc9-kube-api-access-9xmjs\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.426627 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.532432 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"0a15e6ca-b616-402c-b742-2ba7acbf5763","Type":"ContainerStarted","Data":"cf29335d16e1628e96760a4a8f389956a74bd135959a1a2752b3a4ef962a182c"} Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.701464 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.819451 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.878525 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:37 crc kubenswrapper[4685]: I0128 12:52:37.891392 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:38 crc kubenswrapper[4685]: W0128 12:52:38.170852 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc8320012_ecf6_4659_a559_e4934d2aedc9.slice/crio-bce32fc568b7582f9bed1168996cbfd37e701300b733247d114274a65344da60 WatchSource:0}: Error finding container bce32fc568b7582f9bed1168996cbfd37e701300b733247d114274a65344da60: Status 404 returned error can't find the container with id bce32fc568b7582f9bed1168996cbfd37e701300b733247d114274a65344da60 Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.172667 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.542314 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"c8320012-ecf6-4659-a559-e4934d2aedc9","Type":"ContainerStarted","Data":"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.542756 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"c8320012-ecf6-4659-a559-e4934d2aedc9","Type":"ContainerStarted","Data":"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.542776 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"c8320012-ecf6-4659-a559-e4934d2aedc9","Type":"ContainerStarted","Data":"bce32fc568b7582f9bed1168996cbfd37e701300b733247d114274a65344da60"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.542363 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-log" containerID="cri-o://4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee" gracePeriod=30 Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.542384 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-httpd" containerID="cri-o://2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e" gracePeriod=30 Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.565378 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"a36f2403-dd84-4f16-ae78-ee3b95f8ff13","Type":"ContainerStarted","Data":"d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.565729 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"a36f2403-dd84-4f16-ae78-ee3b95f8ff13","Type":"ContainerStarted","Data":"c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.565740 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"a36f2403-dd84-4f16-ae78-ee3b95f8ff13","Type":"ContainerStarted","Data":"dcb5b0185f366150451568da1fa4085b0eb1b5edbd05a0ec2b7488398fd7fdee"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.565749 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"0a15e6ca-b616-402c-b742-2ba7acbf5763","Type":"ContainerStarted","Data":"cac4c7268b6aba2af8e2759cb23fee396f86cd2373636a86eeaf91ffca8fee77"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.565759 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"0a15e6ca-b616-402c-b742-2ba7acbf5763","Type":"ContainerStarted","Data":"d0c2248ff4107e651e4fb744e842d77f5d3c38831a2dd7b914aff1a09c429216"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.570695 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"5ac7fb56-eece-4b9a-96d6-fd3671e0020e","Type":"ContainerStarted","Data":"7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.570738 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"5ac7fb56-eece-4b9a-96d6-fd3671e0020e","Type":"ContainerStarted","Data":"eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.570748 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"5ac7fb56-eece-4b9a-96d6-fd3671e0020e","Type":"ContainerStarted","Data":"3e75c879ece2ca54f48d79df99b5025fa368c2f44f2aa8a5a74416cd1bc0be66"} Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.581501 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-1" podStartSLOduration=2.581484206 podStartE2EDuration="2.581484206s" podCreationTimestamp="2026-01-28 12:52:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:38.574354724 +0000 UTC m=+1909.661768569" watchObservedRunningTime="2026-01-28 12:52:38.581484206 +0000 UTC m=+1909.668898041" Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.603197 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-0" podStartSLOduration=3.6031588709999998 podStartE2EDuration="3.603158871s" podCreationTimestamp="2026-01-28 12:52:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:38.592709165 +0000 UTC m=+1909.680123000" watchObservedRunningTime="2026-01-28 12:52:38.603158871 +0000 UTC m=+1909.690572706" Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.613755 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=3.613735292 podStartE2EDuration="3.613735292s" podCreationTimestamp="2026-01-28 12:52:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:38.611747685 +0000 UTC m=+1909.699161520" watchObservedRunningTime="2026-01-28 12:52:38.613735292 +0000 UTC m=+1909.701149127" Jan 28 12:52:38 crc kubenswrapper[4685]: I0128 12:52:38.636058 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-1" podStartSLOduration=2.636042115 podStartE2EDuration="2.636042115s" podCreationTimestamp="2026-01-28 12:52:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:38.631372402 +0000 UTC m=+1909.718786237" watchObservedRunningTime="2026-01-28 12:52:38.636042115 +0000 UTC m=+1909.723455950" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.040554 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.115261 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.115747 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-httpd-run\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.115902 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xmjs\" (UniqueName: \"kubernetes.io/projected/c8320012-ecf6-4659-a559-e4934d2aedc9-kube-api-access-9xmjs\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116075 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-scripts\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116204 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116240 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-var-locks-brick\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116370 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-iscsi\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116457 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-nvme\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116499 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-run\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116555 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-lib-modules\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116588 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116609 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-sys\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116649 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-run" (OuterVolumeSpecName: "run") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116654 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116670 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116700 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-config-data\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116769 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-sys" (OuterVolumeSpecName: "sys") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116802 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116860 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-logs\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.116912 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-dev\") pod \"c8320012-ecf6-4659-a559-e4934d2aedc9\" (UID: \"c8320012-ecf6-4659-a559-e4934d2aedc9\") " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.117133 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.117279 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-logs" (OuterVolumeSpecName: "logs") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.117369 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-dev" (OuterVolumeSpecName: "dev") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.117833 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.117931 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.118008 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.118085 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.118160 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.118271 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.118355 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.118444 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/c8320012-ecf6-4659-a559-e4934d2aedc9-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.118535 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8320012-ecf6-4659-a559-e4934d2aedc9-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.124836 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance-cache") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.126035 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-scripts" (OuterVolumeSpecName: "scripts") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.126988 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8320012-ecf6-4659-a559-e4934d2aedc9-kube-api-access-9xmjs" (OuterVolumeSpecName: "kube-api-access-9xmjs") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "kube-api-access-9xmjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.128245 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage14-crc" (OuterVolumeSpecName: "glance") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "local-storage14-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.163318 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-config-data" (OuterVolumeSpecName: "config-data") pod "c8320012-ecf6-4659-a559-e4934d2aedc9" (UID: "c8320012-ecf6-4659-a559-e4934d2aedc9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.220035 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.220478 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xmjs\" (UniqueName: \"kubernetes.io/projected/c8320012-ecf6-4659-a559-e4934d2aedc9-kube-api-access-9xmjs\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.220491 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.220501 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8320012-ecf6-4659-a559-e4934d2aedc9-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.220518 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" " Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.238855 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage14-crc" (UniqueName: "kubernetes.io/local-volume/local-storage14-crc") on node "crc" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.243055 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.321678 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.321713 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.582158 4685 generic.go:334] "Generic (PLEG): container finished" podID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerID="2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e" exitCode=143 Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.582196 4685 generic.go:334] "Generic (PLEG): container finished" podID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerID="4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee" exitCode=143 Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.582306 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"c8320012-ecf6-4659-a559-e4934d2aedc9","Type":"ContainerDied","Data":"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e"} Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.582356 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"c8320012-ecf6-4659-a559-e4934d2aedc9","Type":"ContainerDied","Data":"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee"} Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.582367 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"c8320012-ecf6-4659-a559-e4934d2aedc9","Type":"ContainerDied","Data":"bce32fc568b7582f9bed1168996cbfd37e701300b733247d114274a65344da60"} Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.582388 4685 scope.go:117] "RemoveContainer" containerID="2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.582723 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.605307 4685 scope.go:117] "RemoveContainer" containerID="4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.619911 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.623555 4685 scope.go:117] "RemoveContainer" containerID="2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e" Jan 28 12:52:39 crc kubenswrapper[4685]: E0128 12:52:39.623989 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e\": container with ID starting with 2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e not found: ID does not exist" containerID="2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.624051 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e"} err="failed to get container status \"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e\": rpc error: code = NotFound desc = could not find container \"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e\": container with ID starting with 2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e not found: ID does not exist" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.624093 4685 scope.go:117] "RemoveContainer" containerID="4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee" Jan 28 12:52:39 crc kubenswrapper[4685]: E0128 12:52:39.624422 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee\": container with ID starting with 4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee not found: ID does not exist" containerID="4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.624447 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee"} err="failed to get container status \"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee\": rpc error: code = NotFound desc = could not find container \"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee\": container with ID starting with 4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee not found: ID does not exist" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.624461 4685 scope.go:117] "RemoveContainer" containerID="2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.624722 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e"} err="failed to get container status \"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e\": rpc error: code = NotFound desc = could not find container \"2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e\": container with ID starting with 2a1cafa1de8dcfffaae802b13db44502ded8dee5f8a4f3244e4ccc897b21255e not found: ID does not exist" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.624755 4685 scope.go:117] "RemoveContainer" containerID="4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.624974 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee"} err="failed to get container status \"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee\": rpc error: code = NotFound desc = could not find container \"4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee\": container with ID starting with 4d9c9f3f7ca669092d8b60b321cf330f50f3aac6568848a441d140ff8ebc11ee not found: ID does not exist" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.626361 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.646384 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:39 crc kubenswrapper[4685]: E0128 12:52:39.646813 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-log" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.646828 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-log" Jan 28 12:52:39 crc kubenswrapper[4685]: E0128 12:52:39.646842 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-httpd" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.646848 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-httpd" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.647069 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-log" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.647089 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" containerName="glance-httpd" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.648583 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.661338 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.728687 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.829860 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-dev\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.829962 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-config-data\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830080 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830109 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830146 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830163 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-run\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830224 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-logs\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830247 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb8xk\" (UniqueName: \"kubernetes.io/projected/3f5460a2-cb8a-44aa-a4e5-44109e751133-kube-api-access-gb8xk\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830288 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-scripts\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830387 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830492 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830591 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830623 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-sys\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830676 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.830721 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") device mount path \"/mnt/openstack/pv14\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.852353 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932299 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932630 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932660 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932682 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-run\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932706 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-logs\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932736 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb8xk\" (UniqueName: \"kubernetes.io/projected/3f5460a2-cb8a-44aa-a4e5-44109e751133-kube-api-access-gb8xk\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932760 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-scripts\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932799 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-var-locks-brick\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932807 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932850 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-nvme\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932883 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-iscsi\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932909 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-run\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932930 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932981 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-sys\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.933014 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.933071 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-dev\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.933101 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-config-data\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.933215 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-sys\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.932529 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") device mount path \"/mnt/openstack/pv11\"" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.933333 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-logs\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.934332 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-lib-modules\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.934396 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-httpd-run\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.934421 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-dev\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.938606 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-scripts\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.939309 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-config-data\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.952230 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb8xk\" (UniqueName: \"kubernetes.io/projected/3f5460a2-cb8a-44aa-a4e5-44109e751133-kube-api-access-gb8xk\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:39 crc kubenswrapper[4685]: I0128 12:52:39.953648 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-1\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:40 crc kubenswrapper[4685]: I0128 12:52:40.263368 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:40 crc kubenswrapper[4685]: I0128 12:52:40.554304 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8320012-ecf6-4659-a559-e4934d2aedc9" path="/var/lib/kubelet/pods/c8320012-ecf6-4659-a559-e4934d2aedc9/volumes" Jan 28 12:52:40 crc kubenswrapper[4685]: I0128 12:52:40.688990 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:52:40 crc kubenswrapper[4685]: W0128 12:52:40.691164 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f5460a2_cb8a_44aa_a4e5_44109e751133.slice/crio-34a1c1ffaf93da6206173eef156fd626eb5d3710d0c3dea62f78483ff9069650 WatchSource:0}: Error finding container 34a1c1ffaf93da6206173eef156fd626eb5d3710d0c3dea62f78483ff9069650: Status 404 returned error can't find the container with id 34a1c1ffaf93da6206173eef156fd626eb5d3710d0c3dea62f78483ff9069650 Jan 28 12:52:41 crc kubenswrapper[4685]: I0128 12:52:41.602932 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"3f5460a2-cb8a-44aa-a4e5-44109e751133","Type":"ContainerStarted","Data":"aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c"} Jan 28 12:52:41 crc kubenswrapper[4685]: I0128 12:52:41.603545 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"3f5460a2-cb8a-44aa-a4e5-44109e751133","Type":"ContainerStarted","Data":"135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14"} Jan 28 12:52:41 crc kubenswrapper[4685]: I0128 12:52:41.603562 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"3f5460a2-cb8a-44aa-a4e5-44109e751133","Type":"ContainerStarted","Data":"34a1c1ffaf93da6206173eef156fd626eb5d3710d0c3dea62f78483ff9069650"} Jan 28 12:52:41 crc kubenswrapper[4685]: I0128 12:52:41.628661 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-1" podStartSLOduration=2.62863803 podStartE2EDuration="2.62863803s" podCreationTimestamp="2026-01-28 12:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:41.623928476 +0000 UTC m=+1912.711342311" watchObservedRunningTime="2026-01-28 12:52:41.62863803 +0000 UTC m=+1912.716051865" Jan 28 12:52:46 crc kubenswrapper[4685]: I0128 12:52:46.942196 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:46 crc kubenswrapper[4685]: I0128 12:52:46.942836 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:46 crc kubenswrapper[4685]: I0128 12:52:46.997934 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.117528 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.350778 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.350839 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.360900 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.360950 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.383102 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.385983 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.388867 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.406304 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.662527 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.662805 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.662818 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.662828 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.662840 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:47 crc kubenswrapper[4685]: I0128 12:52:47.662849 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.580771 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.667664 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.670548 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.676954 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.676977 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.676990 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.714269 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.770377 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.771393 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:49 crc kubenswrapper[4685]: I0128 12:52:49.783459 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:50 crc kubenswrapper[4685]: I0128 12:52:50.264032 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:50 crc kubenswrapper[4685]: I0128 12:52:50.264396 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:50 crc kubenswrapper[4685]: I0128 12:52:50.303656 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:50 crc kubenswrapper[4685]: I0128 12:52:50.307613 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:50 crc kubenswrapper[4685]: I0128 12:52:50.683987 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:50 crc kubenswrapper[4685]: I0128 12:52:50.684026 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:51 crc kubenswrapper[4685]: I0128 12:52:51.692205 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-log" containerID="cri-o://eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4" gracePeriod=30 Jan 28 12:52:51 crc kubenswrapper[4685]: I0128 12:52:51.692341 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-httpd" containerID="cri-o://7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91" gracePeriod=30 Jan 28 12:52:51 crc kubenswrapper[4685]: I0128 12:52:51.700470 4685 prober.go:107] "Probe failed" probeType="Readiness" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.0.143:9292/healthcheck\": EOF" Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.712180 4685 generic.go:334] "Generic (PLEG): container finished" podID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerID="eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4" exitCode=143 Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.712239 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"5ac7fb56-eece-4b9a-96d6-fd3671e0020e","Type":"ContainerDied","Data":"eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4"} Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.870714 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.870841 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.878373 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.942645 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.942889 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-log" containerID="cri-o://c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759" gracePeriod=30 Jan 28 12:52:52 crc kubenswrapper[4685]: I0128 12:52:52.943058 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-httpd" containerID="cri-o://d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7" gracePeriod=30 Jan 28 12:52:53 crc kubenswrapper[4685]: I0128 12:52:53.727845 4685 generic.go:334] "Generic (PLEG): container finished" podID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerID="c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759" exitCode=143 Jan 28 12:52:53 crc kubenswrapper[4685]: I0128 12:52:53.727901 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"a36f2403-dd84-4f16-ae78-ee3b95f8ff13","Type":"ContainerDied","Data":"c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759"} Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.204767 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288420 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-config-data\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288478 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-iscsi\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288526 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-logs\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288581 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-run\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288634 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-nvme\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288640 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-run" (OuterVolumeSpecName: "run") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288659 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-httpd-run\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288745 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-var-locks-brick\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288747 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288768 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288786 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288889 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288929 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-sys\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288955 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrdsj\" (UniqueName: \"kubernetes.io/projected/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-kube-api-access-rrdsj\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288994 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-lib-modules\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.288990 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289029 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-scripts\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289034 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-sys" (OuterVolumeSpecName: "sys") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289054 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-dev\") pod \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\" (UID: \"5ac7fb56-eece-4b9a-96d6-fd3671e0020e\") " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289057 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289607 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289621 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289630 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289639 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289646 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289654 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289698 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-dev" (OuterVolumeSpecName: "dev") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.289820 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.290137 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-logs" (OuterVolumeSpecName: "logs") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.294518 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.294610 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance-cache") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.295695 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-kube-api-access-rrdsj" (OuterVolumeSpecName: "kube-api-access-rrdsj") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "kube-api-access-rrdsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.316835 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-scripts" (OuterVolumeSpecName: "scripts") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.338510 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-config-data" (OuterVolumeSpecName: "config-data") pod "5ac7fb56-eece-4b9a-96d6-fd3671e0020e" (UID: "5ac7fb56-eece-4b9a-96d6-fd3671e0020e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391184 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391219 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391231 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391260 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391276 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391289 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrdsj\" (UniqueName: \"kubernetes.io/projected/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-kube-api-access-rrdsj\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391303 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.391313 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5ac7fb56-eece-4b9a-96d6-fd3671e0020e-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.407408 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.408660 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.492869 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.492898 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.743622 4685 generic.go:334] "Generic (PLEG): container finished" podID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerID="7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91" exitCode=0 Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.743689 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"5ac7fb56-eece-4b9a-96d6-fd3671e0020e","Type":"ContainerDied","Data":"7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91"} Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.744009 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"5ac7fb56-eece-4b9a-96d6-fd3671e0020e","Type":"ContainerDied","Data":"3e75c879ece2ca54f48d79df99b5025fa368c2f44f2aa8a5a74416cd1bc0be66"} Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.743705 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.750222 4685 scope.go:117] "RemoveContainer" containerID="7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.772133 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.781047 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.780898 4685 scope.go:117] "RemoveContainer" containerID="eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.806852 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:55 crc kubenswrapper[4685]: E0128 12:52:55.807152 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-log" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.810820 4685 scope.go:117] "RemoveContainer" containerID="7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91" Jan 28 12:52:55 crc kubenswrapper[4685]: E0128 12:52:55.811280 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91\": container with ID starting with 7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91 not found: ID does not exist" containerID="7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.811327 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91"} err="failed to get container status \"7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91\": rpc error: code = NotFound desc = could not find container \"7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91\": container with ID starting with 7af96e2ebfd6ba4db1b6ccf4d02debc28c4d84a2066b6837c33a35ae3538dc91 not found: ID does not exist" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.811365 4685 scope.go:117] "RemoveContainer" containerID="eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4" Jan 28 12:52:55 crc kubenswrapper[4685]: E0128 12:52:55.811790 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4\": container with ID starting with eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4 not found: ID does not exist" containerID="eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.811812 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4"} err="failed to get container status \"eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4\": rpc error: code = NotFound desc = could not find container \"eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4\": container with ID starting with eb20730b53b0a274d201d5547143efe22d59c28cf37b8a9384cbc11fb008a5a4 not found: ID does not exist" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.807163 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-log" Jan 28 12:52:55 crc kubenswrapper[4685]: E0128 12:52:55.812891 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-httpd" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.812903 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-httpd" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.813114 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-log" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.813125 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" containerName="glance-httpd" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.813784 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.813917 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899710 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-run\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899784 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899810 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-scripts\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899844 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899875 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899912 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899943 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.899977 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-logs\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.900003 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.900026 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-sys\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.900049 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtr82\" (UniqueName: \"kubernetes.io/projected/f76048d5-14a9-4f65-92be-5fd02e186453-kube-api-access-jtr82\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.900103 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.900143 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-config-data\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:55 crc kubenswrapper[4685]: I0128 12:52:55.900234 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-dev\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.001582 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-dev\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.001724 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-dev\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.002106 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-run\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.002291 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.002584 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-scripts\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003269 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003373 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003444 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-var-locks-brick\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003399 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-nvme\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.002244 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-run\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.002541 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") device mount path \"/mnt/openstack/pv04\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003662 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003758 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") device mount path \"/mnt/openstack/pv12\"" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003764 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003892 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-logs\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003923 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003945 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-sys\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.003976 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtr82\" (UniqueName: \"kubernetes.io/projected/f76048d5-14a9-4f65-92be-5fd02e186453-kube-api-access-jtr82\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.004051 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-sys\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.004062 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-iscsi\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.004416 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-logs\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.004524 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.004602 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.004653 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-config-data\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.004715 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-lib-modules\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.007891 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-scripts\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.008691 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-config-data\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.024719 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtr82\" (UniqueName: \"kubernetes.io/projected/f76048d5-14a9-4f65-92be-5fd02e186453-kube-api-access-jtr82\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.035920 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.041712 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.133507 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.558896 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ac7fb56-eece-4b9a-96d6-fd3671e0020e" path="/var/lib/kubelet/pods/5ac7fb56-eece-4b9a-96d6-fd3671e0020e/volumes" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.561884 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.612719 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613325 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-nvme\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613392 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-sys\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613445 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613505 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-sys" (OuterVolumeSpecName: "sys") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613547 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxvr8\" (UniqueName: \"kubernetes.io/projected/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-kube-api-access-zxvr8\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613585 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-dev\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613636 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-iscsi\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613655 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-dev" (OuterVolumeSpecName: "dev") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613674 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-config-data\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613682 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613723 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-var-locks-brick\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613773 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-logs\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613831 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-httpd-run\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613888 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-scripts\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613916 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613952 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-run\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.613990 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614026 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-lib-modules\") pod \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\" (UID: \"a36f2403-dd84-4f16-ae78-ee3b95f8ff13\") " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614355 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-run" (OuterVolumeSpecName: "run") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614731 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614751 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614763 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614773 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614764 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614781 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614939 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-logs" (OuterVolumeSpecName: "logs") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.614979 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.615014 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.617900 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.618866 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-kube-api-access-zxvr8" (OuterVolumeSpecName: "kube-api-access-zxvr8") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "kube-api-access-zxvr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.618987 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance-cache") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.620551 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-scripts" (OuterVolumeSpecName: "scripts") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: W0128 12:52:56.623857 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf76048d5_14a9_4f65_92be_5fd02e186453.slice/crio-c3cc5582d879fad77db0275a9bb6b466ec48ed0bd80ac874529fd92317eebf1b WatchSource:0}: Error finding container c3cc5582d879fad77db0275a9bb6b466ec48ed0bd80ac874529fd92317eebf1b: Status 404 returned error can't find the container with id c3cc5582d879fad77db0275a9bb6b466ec48ed0bd80ac874529fd92317eebf1b Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.653847 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-config-data" (OuterVolumeSpecName: "config-data") pod "a36f2403-dd84-4f16-ae78-ee3b95f8ff13" (UID: "a36f2403-dd84-4f16-ae78-ee3b95f8ff13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716057 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxvr8\" (UniqueName: \"kubernetes.io/projected/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-kube-api-access-zxvr8\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716088 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716098 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716109 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716117 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716125 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716154 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716190 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.716202 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/a36f2403-dd84-4f16-ae78-ee3b95f8ff13-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.733064 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.734116 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.752105 4685 generic.go:334] "Generic (PLEG): container finished" podID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerID="d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7" exitCode=0 Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.752255 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"a36f2403-dd84-4f16-ae78-ee3b95f8ff13","Type":"ContainerDied","Data":"d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7"} Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.752285 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"a36f2403-dd84-4f16-ae78-ee3b95f8ff13","Type":"ContainerDied","Data":"dcb5b0185f366150451568da1fa4085b0eb1b5edbd05a0ec2b7488398fd7fdee"} Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.752303 4685 scope.go:117] "RemoveContainer" containerID="d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.752417 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.757729 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"f76048d5-14a9-4f65-92be-5fd02e186453","Type":"ContainerStarted","Data":"965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780"} Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.757769 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"f76048d5-14a9-4f65-92be-5fd02e186453","Type":"ContainerStarted","Data":"c3cc5582d879fad77db0275a9bb6b466ec48ed0bd80ac874529fd92317eebf1b"} Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.781234 4685 scope.go:117] "RemoveContainer" containerID="c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.791409 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.805775 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.817740 4685 scope.go:117] "RemoveContainer" containerID="d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7" Jan 28 12:52:56 crc kubenswrapper[4685]: E0128 12:52:56.818362 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7\": container with ID starting with d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7 not found: ID does not exist" containerID="d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.818432 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7"} err="failed to get container status \"d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7\": rpc error: code = NotFound desc = could not find container \"d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7\": container with ID starting with d789f8e7467b7bc40b62e98c5f54aeca2b696eac503a0f23b1cb9c2bdfb72ea7 not found: ID does not exist" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.818461 4685 scope.go:117] "RemoveContainer" containerID="c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759" Jan 28 12:52:56 crc kubenswrapper[4685]: E0128 12:52:56.818788 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759\": container with ID starting with c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759 not found: ID does not exist" containerID="c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.818832 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759"} err="failed to get container status \"c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759\": rpc error: code = NotFound desc = could not find container \"c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759\": container with ID starting with c774664d8158f66a299be2ed8b5afd1d51ff7273c1cc4539a23bd86617e5e759 not found: ID does not exist" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.825639 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.825689 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.831364 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:56 crc kubenswrapper[4685]: E0128 12:52:56.831705 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-log" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.831722 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-log" Jan 28 12:52:56 crc kubenswrapper[4685]: E0128 12:52:56.831744 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-httpd" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.831752 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-httpd" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.831925 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-log" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.831945 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" containerName="glance-httpd" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.832815 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:56 crc kubenswrapper[4685]: I0128 12:52:56.858786 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.030742 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031216 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031242 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-logs\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031279 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031313 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031333 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-run\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031372 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-dev\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031404 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031431 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031462 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031488 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031516 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031547 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-sys\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.031576 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92d29\" (UniqueName: \"kubernetes.io/projected/239a471f-04bf-4e9a-b3d2-692026e9ca8d-kube-api-access-92d29\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133137 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133209 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133241 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133268 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133291 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133318 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-sys\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133340 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92d29\" (UniqueName: \"kubernetes.io/projected/239a471f-04bf-4e9a-b3d2-692026e9ca8d-kube-api-access-92d29\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133381 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133401 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133416 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-logs\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133439 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133460 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133475 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-run\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133504 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-dev\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133571 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-dev\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133787 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-lib-modules\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133814 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-var-locks-brick\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133844 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-run\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.133972 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-nvme\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.134007 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-sys\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.134040 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-iscsi\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.134095 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") device mount path \"/mnt/openstack/pv08\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.134108 4685 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") device mount path \"/mnt/openstack/pv02\"" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.134392 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.134445 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-logs\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.140738 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.148008 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.154584 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92d29\" (UniqueName: \"kubernetes.io/projected/239a471f-04bf-4e9a-b3d2-692026e9ca8d-kube-api-access-92d29\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.155816 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.157658 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.169323 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.568280 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.771398 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"239a471f-04bf-4e9a-b3d2-692026e9ca8d","Type":"ContainerStarted","Data":"270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615"} Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.771451 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"239a471f-04bf-4e9a-b3d2-692026e9ca8d","Type":"ContainerStarted","Data":"7e9072ead0f358b8dfd9939d74e430adb02ed64a66680304371051e3d6777bc0"} Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.779697 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"f76048d5-14a9-4f65-92be-5fd02e186453","Type":"ContainerStarted","Data":"d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6"} Jan 28 12:52:57 crc kubenswrapper[4685]: I0128 12:52:57.815499 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-external-api-0" podStartSLOduration=2.81547701 podStartE2EDuration="2.81547701s" podCreationTimestamp="2026-01-28 12:52:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:57.800778843 +0000 UTC m=+1928.888192688" watchObservedRunningTime="2026-01-28 12:52:57.81547701 +0000 UTC m=+1928.902890845" Jan 28 12:52:58 crc kubenswrapper[4685]: I0128 12:52:58.556396 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a36f2403-dd84-4f16-ae78-ee3b95f8ff13" path="/var/lib/kubelet/pods/a36f2403-dd84-4f16-ae78-ee3b95f8ff13/volumes" Jan 28 12:52:58 crc kubenswrapper[4685]: I0128 12:52:58.788050 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"239a471f-04bf-4e9a-b3d2-692026e9ca8d","Type":"ContainerStarted","Data":"6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574"} Jan 28 12:52:58 crc kubenswrapper[4685]: I0128 12:52:58.824799 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="glance-kuttl-tests/glance-default-internal-api-0" podStartSLOduration=2.824778973 podStartE2EDuration="2.824778973s" podCreationTimestamp="2026-01-28 12:52:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 12:52:58.813273086 +0000 UTC m=+1929.900686931" watchObservedRunningTime="2026-01-28 12:52:58.824778973 +0000 UTC m=+1929.912192808" Jan 28 12:53:06 crc kubenswrapper[4685]: I0128 12:53:06.134910 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:06 crc kubenswrapper[4685]: I0128 12:53:06.135551 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:06 crc kubenswrapper[4685]: I0128 12:53:06.157032 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:06 crc kubenswrapper[4685]: I0128 12:53:06.185587 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:06 crc kubenswrapper[4685]: I0128 12:53:06.846678 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:06 crc kubenswrapper[4685]: I0128 12:53:06.846996 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:07 crc kubenswrapper[4685]: I0128 12:53:07.169756 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:07 crc kubenswrapper[4685]: I0128 12:53:07.169820 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:07 crc kubenswrapper[4685]: I0128 12:53:07.195190 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:07 crc kubenswrapper[4685]: I0128 12:53:07.207042 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:07 crc kubenswrapper[4685]: I0128 12:53:07.853511 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:07 crc kubenswrapper[4685]: I0128 12:53:07.853562 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:08 crc kubenswrapper[4685]: I0128 12:53:08.800765 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:08 crc kubenswrapper[4685]: I0128 12:53:08.859544 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:53:08 crc kubenswrapper[4685]: I0128 12:53:08.921653 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:09 crc kubenswrapper[4685]: I0128 12:53:09.924682 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:09 crc kubenswrapper[4685]: I0128 12:53:09.925213 4685 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 12:53:09 crc kubenswrapper[4685]: I0128 12:53:09.983659 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:26 crc kubenswrapper[4685]: I0128 12:53:26.070197 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/root-account-create-update-2gt8g"] Jan 28 12:53:26 crc kubenswrapper[4685]: I0128 12:53:26.086095 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/root-account-create-update-2gt8g"] Jan 28 12:53:26 crc kubenswrapper[4685]: I0128 12:53:26.556199 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5f94ceb-23bc-4de1-b757-bfe9cefe498b" path="/var/lib/kubelet/pods/a5f94ceb-23bc-4de1-b757-bfe9cefe498b/volumes" Jan 28 12:53:49 crc kubenswrapper[4685]: I0128 12:53:49.859964 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:53:49 crc kubenswrapper[4685]: I0128 12:53:49.860802 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-log" containerID="cri-o://d0c2248ff4107e651e4fb744e842d77f5d3c38831a2dd7b914aff1a09c429216" gracePeriod=30 Jan 28 12:53:49 crc kubenswrapper[4685]: I0128 12:53:49.863323 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-1" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-httpd" containerID="cri-o://cac4c7268b6aba2af8e2759cb23fee396f86cd2373636a86eeaf91ffca8fee77" gracePeriod=30 Jan 28 12:53:50 crc kubenswrapper[4685]: I0128 12:53:50.023739 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:53:50 crc kubenswrapper[4685]: I0128 12:53:50.024307 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-log" containerID="cri-o://135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14" gracePeriod=30 Jan 28 12:53:50 crc kubenswrapper[4685]: I0128 12:53:50.024403 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-1" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-httpd" containerID="cri-o://aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c" gracePeriod=30 Jan 28 12:53:50 crc kubenswrapper[4685]: I0128 12:53:50.201980 4685 generic.go:334] "Generic (PLEG): container finished" podID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerID="d0c2248ff4107e651e4fb744e842d77f5d3c38831a2dd7b914aff1a09c429216" exitCode=143 Jan 28 12:53:50 crc kubenswrapper[4685]: I0128 12:53:50.202101 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"0a15e6ca-b616-402c-b742-2ba7acbf5763","Type":"ContainerDied","Data":"d0c2248ff4107e651e4fb744e842d77f5d3c38831a2dd7b914aff1a09c429216"} Jan 28 12:53:50 crc kubenswrapper[4685]: I0128 12:53:50.204296 4685 generic.go:334] "Generic (PLEG): container finished" podID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerID="135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14" exitCode=143 Jan 28 12:53:50 crc kubenswrapper[4685]: I0128 12:53:50.204336 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"3f5460a2-cb8a-44aa-a4e5-44109e751133","Type":"ContainerDied","Data":"135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14"} Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.246221 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-sync-n8m6j"] Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.254470 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-sync-n8m6j"] Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.323737 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.324000 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-log" containerID="cri-o://965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780" gracePeriod=30 Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.324459 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-external-api-0" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-httpd" containerID="cri-o://d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6" gracePeriod=30 Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.347838 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["glance-kuttl-tests/glance4464-account-delete-wnc98"] Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.349167 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.363428 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance4464-account-delete-wnc98"] Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.406053 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.406311 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-log" containerID="cri-o://270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615" gracePeriod=30 Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.406419 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="glance-kuttl-tests/glance-default-internal-api-0" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-httpd" containerID="cri-o://6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574" gracePeriod=30 Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.432631 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-operator-scripts\") pod \"glance4464-account-delete-wnc98\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.432746 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz67x\" (UniqueName: \"kubernetes.io/projected/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-kube-api-access-sz67x\") pod \"glance4464-account-delete-wnc98\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.533976 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-operator-scripts\") pod \"glance4464-account-delete-wnc98\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.534162 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz67x\" (UniqueName: \"kubernetes.io/projected/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-kube-api-access-sz67x\") pod \"glance4464-account-delete-wnc98\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.535368 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-operator-scripts\") pod \"glance4464-account-delete-wnc98\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.557036 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz67x\" (UniqueName: \"kubernetes.io/projected/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-kube-api-access-sz67x\") pod \"glance4464-account-delete-wnc98\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:51 crc kubenswrapper[4685]: I0128 12:53:51.667377 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.130511 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["glance-kuttl-tests/glance4464-account-delete-wnc98"] Jan 28 12:53:52 crc kubenswrapper[4685]: W0128 12:53:52.142852 4685 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0f5e384_94e8_4a68_ae6b_8cd5e05d85df.slice/crio-6d5da8f4ddbe3e491dbff6a8ef316ba816f0b2d9b09c1a8950637b77f461561d WatchSource:0}: Error finding container 6d5da8f4ddbe3e491dbff6a8ef316ba816f0b2d9b09c1a8950637b77f461561d: Status 404 returned error can't find the container with id 6d5da8f4ddbe3e491dbff6a8ef316ba816f0b2d9b09c1a8950637b77f461561d Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.209252 4685 scope.go:117] "RemoveContainer" containerID="3235284ae6f2d9977599382e9098d374ce32da2197b31d89beed0a2208f3108b" Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.225539 4685 generic.go:334] "Generic (PLEG): container finished" podID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerID="270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615" exitCode=143 Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.225931 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"239a471f-04bf-4e9a-b3d2-692026e9ca8d","Type":"ContainerDied","Data":"270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615"} Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.228215 4685 generic.go:334] "Generic (PLEG): container finished" podID="f76048d5-14a9-4f65-92be-5fd02e186453" containerID="965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780" exitCode=143 Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.228280 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"f76048d5-14a9-4f65-92be-5fd02e186453","Type":"ContainerDied","Data":"965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780"} Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.229188 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance4464-account-delete-wnc98" event={"ID":"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df","Type":"ContainerStarted","Data":"6d5da8f4ddbe3e491dbff6a8ef316ba816f0b2d9b09c1a8950637b77f461561d"} Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.268975 4685 scope.go:117] "RemoveContainer" containerID="9bfa10d3c3c904d0a93fca1f49900e9776156d5ace85d98175464ee843aa4de7" Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.283359 4685 scope.go:117] "RemoveContainer" containerID="2669ef2c9e27ff4eeaabca032f749a054eaf41467eca1db998dc86e70b5665a5" Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.297151 4685 scope.go:117] "RemoveContainer" containerID="80963df0789d33cfb44207aa62db4d581166e71f6a8c435372b377cfcf6ab186" Jan 28 12:53:52 crc kubenswrapper[4685]: I0128 12:53:52.555893 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c9965be-6aca-45c5-811c-16b0444568da" path="/var/lib/kubelet/pods/1c9965be-6aca-45c5-811c-16b0444568da/volumes" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.237525 4685 generic.go:334] "Generic (PLEG): container finished" podID="b0f5e384-94e8-4a68-ae6b-8cd5e05d85df" containerID="eed369eda4020a8f30e9d8f0a75514122366f4b4382f0ba32dd1c6a67833a260" exitCode=0 Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.237594 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance4464-account-delete-wnc98" event={"ID":"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df","Type":"ContainerDied","Data":"eed369eda4020a8f30e9d8f0a75514122366f4b4382f0ba32dd1c6a67833a260"} Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.241486 4685 generic.go:334] "Generic (PLEG): container finished" podID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerID="cac4c7268b6aba2af8e2759cb23fee396f86cd2373636a86eeaf91ffca8fee77" exitCode=0 Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.241528 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"0a15e6ca-b616-402c-b742-2ba7acbf5763","Type":"ContainerDied","Data":"cac4c7268b6aba2af8e2759cb23fee396f86cd2373636a86eeaf91ffca8fee77"} Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.776499 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.872958 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-logs\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873000 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gb8xk\" (UniqueName: \"kubernetes.io/projected/3f5460a2-cb8a-44aa-a4e5-44109e751133-kube-api-access-gb8xk\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873051 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-nvme\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873081 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-config-data\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873128 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-scripts\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873199 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873217 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-run\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873246 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-dev\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873283 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-httpd-run\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873297 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-sys\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873316 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-iscsi\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873332 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-var-locks-brick\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873370 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-lib-modules\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873392 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"3f5460a2-cb8a-44aa-a4e5-44109e751133\" (UID: \"3f5460a2-cb8a-44aa-a4e5-44109e751133\") " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873205 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873364 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-run" (OuterVolumeSpecName: "run") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873511 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-logs" (OuterVolumeSpecName: "logs") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873553 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-sys" (OuterVolumeSpecName: "sys") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873608 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-dev" (OuterVolumeSpecName: "dev") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873629 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873651 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873667 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.873883 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874267 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874286 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874299 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874309 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874323 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3f5460a2-cb8a-44aa-a4e5-44109e751133-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874334 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874346 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874355 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.874365 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3f5460a2-cb8a-44aa-a4e5-44109e751133-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.949880 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance-cache") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.949912 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage14-crc" (OuterVolumeSpecName: "glance") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "local-storage14-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.949881 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-scripts" (OuterVolumeSpecName: "scripts") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.950060 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f5460a2-cb8a-44aa-a4e5-44109e751133-kube-api-access-gb8xk" (OuterVolumeSpecName: "kube-api-access-gb8xk") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "kube-api-access-gb8xk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.975827 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.975862 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.975877 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gb8xk\" (UniqueName: \"kubernetes.io/projected/3f5460a2-cb8a-44aa-a4e5-44109e751133-kube-api-access-gb8xk\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.975889 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:53 crc kubenswrapper[4685]: I0128 12:53:53.978463 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-config-data" (OuterVolumeSpecName: "config-data") pod "3f5460a2-cb8a-44aa-a4e5-44109e751133" (UID: "3f5460a2-cb8a-44aa-a4e5-44109e751133"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.001101 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage14-crc" (UniqueName: "kubernetes.io/local-volume/local-storage14-crc") on node "crc" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.017400 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.081644 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.081688 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5460a2-cb8a-44aa-a4e5-44109e751133-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.081703 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage14-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage14-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.108029 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182436 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182510 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-httpd-run\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182534 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-scripts\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182560 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-lib-modules\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182587 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-iscsi\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182610 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-logs\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182647 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqmcz\" (UniqueName: \"kubernetes.io/projected/0a15e6ca-b616-402c-b742-2ba7acbf5763-kube-api-access-qqmcz\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182672 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-var-locks-brick\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182697 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-sys\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182774 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-dev\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182790 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182806 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-run\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182823 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-nvme\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182816 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182843 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-config-data\") pod \"0a15e6ca-b616-402c-b742-2ba7acbf5763\" (UID: \"0a15e6ca-b616-402c-b742-2ba7acbf5763\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.182875 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183086 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183097 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183343 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183384 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-dev" (OuterVolumeSpecName: "dev") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183413 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-run" (OuterVolumeSpecName: "run") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183432 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183424 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183351 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-sys" (OuterVolumeSpecName: "sys") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.183671 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-logs" (OuterVolumeSpecName: "logs") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.186452 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage13-crc" (OuterVolumeSpecName: "glance-cache") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "local-storage13-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.186516 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage17-crc" (OuterVolumeSpecName: "glance") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "local-storage17-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.186555 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-scripts" (OuterVolumeSpecName: "scripts") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.187017 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a15e6ca-b616-402c-b742-2ba7acbf5763-kube-api-access-qqmcz" (OuterVolumeSpecName: "kube-api-access-qqmcz") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "kube-api-access-qqmcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.215980 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-config-data" (OuterVolumeSpecName: "config-data") pod "0a15e6ca-b616-402c-b742-2ba7acbf5763" (UID: "0a15e6ca-b616-402c-b742-2ba7acbf5763"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.254138 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-1" event={"ID":"0a15e6ca-b616-402c-b742-2ba7acbf5763","Type":"ContainerDied","Data":"cf29335d16e1628e96760a4a8f389956a74bd135959a1a2752b3a4ef962a182c"} Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.254211 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-1" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.254536 4685 scope.go:117] "RemoveContainer" containerID="cac4c7268b6aba2af8e2759cb23fee396f86cd2373636a86eeaf91ffca8fee77" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.259377 4685 generic.go:334] "Generic (PLEG): container finished" podID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerID="aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c" exitCode=0 Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.259613 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-1" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.260289 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"3f5460a2-cb8a-44aa-a4e5-44109e751133","Type":"ContainerDied","Data":"aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c"} Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.260351 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-1" event={"ID":"3f5460a2-cb8a-44aa-a4e5-44109e751133","Type":"ContainerDied","Data":"34a1c1ffaf93da6206173eef156fd626eb5d3710d0c3dea62f78483ff9069650"} Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.277937 4685 scope.go:117] "RemoveContainer" containerID="d0c2248ff4107e651e4fb744e842d77f5d3c38831a2dd7b914aff1a09c429216" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284231 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a15e6ca-b616-402c-b742-2ba7acbf5763-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284250 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqmcz\" (UniqueName: \"kubernetes.io/projected/0a15e6ca-b616-402c-b742-2ba7acbf5763-kube-api-access-qqmcz\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284262 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284273 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284309 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") on node \"crc\" " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284322 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284333 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284341 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284354 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284362 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a15e6ca-b616-402c-b742-2ba7acbf5763-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284371 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.284380 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0a15e6ca-b616-402c-b742-2ba7acbf5763-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.297626 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.305479 4685 scope.go:117] "RemoveContainer" containerID="aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.305605 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-1"] Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.307526 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage17-crc" (UniqueName: "kubernetes.io/local-volume/local-storage17-crc") on node "crc" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.310215 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage13-crc" (UniqueName: "kubernetes.io/local-volume/local-storage13-crc") on node "crc" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.316324 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.324206 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-1"] Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.330667 4685 scope.go:117] "RemoveContainer" containerID="135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.347254 4685 scope.go:117] "RemoveContainer" containerID="aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c" Jan 28 12:53:54 crc kubenswrapper[4685]: E0128 12:53:54.347609 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c\": container with ID starting with aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c not found: ID does not exist" containerID="aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.347644 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c"} err="failed to get container status \"aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c\": rpc error: code = NotFound desc = could not find container \"aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c\": container with ID starting with aa329771fc2f16e17d131a8613d5f5490459ff98b4f92a610b7f2913f4ccb16c not found: ID does not exist" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.347666 4685 scope.go:117] "RemoveContainer" containerID="135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14" Jan 28 12:53:54 crc kubenswrapper[4685]: E0128 12:53:54.347895 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14\": container with ID starting with 135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14 not found: ID does not exist" containerID="135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.347924 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14"} err="failed to get container status \"135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14\": rpc error: code = NotFound desc = could not find container \"135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14\": container with ID starting with 135ea6a9de8a9e50de966e3781bd0d71e7c08349701ba177a29429eb98f94a14 not found: ID does not exist" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.385567 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage17-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage17-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.385608 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage13-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage13-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.506695 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.580510 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" path="/var/lib/kubelet/pods/0a15e6ca-b616-402c-b742-2ba7acbf5763/volumes" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.581358 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" path="/var/lib/kubelet/pods/3f5460a2-cb8a-44aa-a4e5-44109e751133/volumes" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.588374 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-operator-scripts\") pod \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.588582 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sz67x\" (UniqueName: \"kubernetes.io/projected/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-kube-api-access-sz67x\") pod \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\" (UID: \"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.589206 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b0f5e384-94e8-4a68-ae6b-8cd5e05d85df" (UID: "b0f5e384-94e8-4a68-ae6b-8cd5e05d85df"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.594391 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-kube-api-access-sz67x" (OuterVolumeSpecName: "kube-api-access-sz67x") pod "b0f5e384-94e8-4a68-ae6b-8cd5e05d85df" (UID: "b0f5e384-94e8-4a68-ae6b-8cd5e05d85df"). InnerVolumeSpecName "kube-api-access-sz67x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.690897 4685 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.690945 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sz67x\" (UniqueName: \"kubernetes.io/projected/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df-kube-api-access-sz67x\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.876630 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.893802 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-iscsi\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.893853 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-scripts\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.893899 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-logs\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.893921 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.893977 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-var-locks-brick\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894009 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-sys\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894029 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-dev\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894071 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-sys" (OuterVolumeSpecName: "sys") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894100 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-run\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894093 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894153 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-run" (OuterVolumeSpecName: "run") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894163 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-dev" (OuterVolumeSpecName: "dev") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894219 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtr82\" (UniqueName: \"kubernetes.io/projected/f76048d5-14a9-4f65-92be-5fd02e186453-kube-api-access-jtr82\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894243 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-nvme\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894281 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-config-data\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894355 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894313 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894397 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894749 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894908 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-logs" (OuterVolumeSpecName: "logs") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894918 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-lib-modules\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.894973 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-httpd-run\") pod \"f76048d5-14a9-4f65-92be-5fd02e186453\" (UID: \"f76048d5-14a9-4f65-92be-5fd02e186453\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895471 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895495 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895507 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895521 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895531 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895541 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895551 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895561 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f76048d5-14a9-4f65-92be-5fd02e186453-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.895795 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.898099 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance-cache") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.898127 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-scripts" (OuterVolumeSpecName: "scripts") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.898139 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.898250 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f76048d5-14a9-4f65-92be-5fd02e186453-kube-api-access-jtr82" (OuterVolumeSpecName: "kube-api-access-jtr82") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "kube-api-access-jtr82". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.939080 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-config-data" (OuterVolumeSpecName: "config-data") pod "f76048d5-14a9-4f65-92be-5fd02e186453" (UID: "f76048d5-14a9-4f65-92be-5fd02e186453"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.963640 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997002 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-lib-modules\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997067 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-dev\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997097 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-sys\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997120 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-run\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997138 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997134 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-lib-modules" (OuterVolumeSpecName: "lib-modules") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "lib-modules". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997162 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-logs\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997197 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-iscsi\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997220 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-nvme\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997250 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance-cache\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997249 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-sys" (OuterVolumeSpecName: "sys") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "sys". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997269 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-dev" (OuterVolumeSpecName: "dev") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "dev". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997280 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-iscsi" (OuterVolumeSpecName: "etc-iscsi") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "etc-iscsi". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997311 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-nvme" (OuterVolumeSpecName: "etc-nvme") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "etc-nvme". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997286 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-httpd-run\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997340 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-run" (OuterVolumeSpecName: "run") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997444 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92d29\" (UniqueName: \"kubernetes.io/projected/239a471f-04bf-4e9a-b3d2-692026e9ca8d-kube-api-access-92d29\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997480 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-var-locks-brick\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997501 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-logs" (OuterVolumeSpecName: "logs") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997556 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-scripts\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997589 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-config-data\") pod \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\" (UID: \"239a471f-04bf-4e9a-b3d2-692026e9ca8d\") " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997663 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.997710 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-var-locks-brick" (OuterVolumeSpecName: "var-locks-brick") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "var-locks-brick". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998144 4685 reconciler_common.go:293] "Volume detached for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-lib-modules\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998164 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998193 4685 reconciler_common.go:293] "Volume detached for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-dev\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998202 4685 reconciler_common.go:293] "Volume detached for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-sys\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998210 4685 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998220 4685 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-logs\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998228 4685 reconciler_common.go:293] "Volume detached for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-iscsi\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998236 4685 reconciler_common.go:293] "Volume detached for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-etc-nvme\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998245 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/239a471f-04bf-4e9a-b3d2-692026e9ca8d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998268 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtr82\" (UniqueName: \"kubernetes.io/projected/f76048d5-14a9-4f65-92be-5fd02e186453-kube-api-access-jtr82\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998280 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f76048d5-14a9-4f65-92be-5fd02e186453-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998299 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998312 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998322 4685 reconciler_common.go:293] "Volume detached for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/239a471f-04bf-4e9a-b3d2-692026e9ca8d-var-locks-brick\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:54 crc kubenswrapper[4685]: I0128 12:53:54.998344 4685 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f76048d5-14a9-4f65-92be-5fd02e186453-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.013130 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.013373 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.027908 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance-cache") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.028231 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.028375 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-scripts" (OuterVolumeSpecName: "scripts") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.028615 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/239a471f-04bf-4e9a-b3d2-692026e9ca8d-kube-api-access-92d29" (OuterVolumeSpecName: "kube-api-access-92d29") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "kube-api-access-92d29". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.057013 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-config-data" (OuterVolumeSpecName: "config-data") pod "239a471f-04bf-4e9a-b3d2-692026e9ca8d" (UID: "239a471f-04bf-4e9a-b3d2-692026e9ca8d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.099636 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92d29\" (UniqueName: \"kubernetes.io/projected/239a471f-04bf-4e9a-b3d2-692026e9ca8d-kube-api-access-92d29\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.099666 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.099676 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.099684 4685 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.099692 4685 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239a471f-04bf-4e9a-b3d2-692026e9ca8d-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.099720 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.099733 4685 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.112047 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.112719 4685 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.201448 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.201478 4685 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.268689 4685 generic.go:334] "Generic (PLEG): container finished" podID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerID="6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574" exitCode=0 Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.268760 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"239a471f-04bf-4e9a-b3d2-692026e9ca8d","Type":"ContainerDied","Data":"6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574"} Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.268799 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-internal-api-0" event={"ID":"239a471f-04bf-4e9a-b3d2-692026e9ca8d","Type":"ContainerDied","Data":"7e9072ead0f358b8dfd9939d74e430adb02ed64a66680304371051e3d6777bc0"} Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.268813 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-internal-api-0" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.268821 4685 scope.go:117] "RemoveContainer" containerID="6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.270753 4685 generic.go:334] "Generic (PLEG): container finished" podID="f76048d5-14a9-4f65-92be-5fd02e186453" containerID="d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6" exitCode=0 Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.270802 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"f76048d5-14a9-4f65-92be-5fd02e186453","Type":"ContainerDied","Data":"d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6"} Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.270825 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance-default-external-api-0" event={"ID":"f76048d5-14a9-4f65-92be-5fd02e186453","Type":"ContainerDied","Data":"c3cc5582d879fad77db0275a9bb6b466ec48ed0bd80ac874529fd92317eebf1b"} Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.270884 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance-default-external-api-0" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.272738 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="glance-kuttl-tests/glance4464-account-delete-wnc98" event={"ID":"b0f5e384-94e8-4a68-ae6b-8cd5e05d85df","Type":"ContainerDied","Data":"6d5da8f4ddbe3e491dbff6a8ef316ba816f0b2d9b09c1a8950637b77f461561d"} Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.272767 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d5da8f4ddbe3e491dbff6a8ef316ba816f0b2d9b09c1a8950637b77f461561d" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.272806 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="glance-kuttl-tests/glance4464-account-delete-wnc98" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.291746 4685 scope.go:117] "RemoveContainer" containerID="270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.302105 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.308045 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-internal-api-0"] Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.316818 4685 scope.go:117] "RemoveContainer" containerID="6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574" Jan 28 12:53:55 crc kubenswrapper[4685]: E0128 12:53:55.317366 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574\": container with ID starting with 6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574 not found: ID does not exist" containerID="6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.317397 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574"} err="failed to get container status \"6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574\": rpc error: code = NotFound desc = could not find container \"6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574\": container with ID starting with 6657fa0c91fd5dadedd70ffd9459dddf56d2bccb8f4ef257e58154dbb1094574 not found: ID does not exist" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.317426 4685 scope.go:117] "RemoveContainer" containerID="270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615" Jan 28 12:53:55 crc kubenswrapper[4685]: E0128 12:53:55.317764 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615\": container with ID starting with 270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615 not found: ID does not exist" containerID="270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.317805 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615"} err="failed to get container status \"270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615\": rpc error: code = NotFound desc = could not find container \"270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615\": container with ID starting with 270918882c9dda7bc7522e79466e8b23b1f3f9bed5d2c9c2035914f4058d0615 not found: ID does not exist" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.317833 4685 scope.go:117] "RemoveContainer" containerID="d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.332657 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.341094 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-default-external-api-0"] Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.347799 4685 scope.go:117] "RemoveContainer" containerID="965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.364745 4685 scope.go:117] "RemoveContainer" containerID="d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6" Jan 28 12:53:55 crc kubenswrapper[4685]: E0128 12:53:55.365091 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6\": container with ID starting with d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6 not found: ID does not exist" containerID="d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.365124 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6"} err="failed to get container status \"d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6\": rpc error: code = NotFound desc = could not find container \"d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6\": container with ID starting with d57f2b8b534368c79934594d7936f1f1e9bb22ee0f6a0f9adc9463d7706bcfe6 not found: ID does not exist" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.365144 4685 scope.go:117] "RemoveContainer" containerID="965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780" Jan 28 12:53:55 crc kubenswrapper[4685]: E0128 12:53:55.365597 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780\": container with ID starting with 965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780 not found: ID does not exist" containerID="965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780" Jan 28 12:53:55 crc kubenswrapper[4685]: I0128 12:53:55.365635 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780"} err="failed to get container status \"965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780\": rpc error: code = NotFound desc = could not find container \"965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780\": container with ID starting with 965be0f4a4e8717cd5211783f16fae136683e09c3ebcf4562407b107747a7780 not found: ID does not exist" Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.344030 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-db-create-d86jc"] Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.350197 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-db-create-d86jc"] Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.359527 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance4464-account-delete-wnc98"] Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.364934 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/glance-4464-account-create-update-2tch8"] Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.370190 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance4464-account-delete-wnc98"] Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.375181 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/glance-4464-account-create-update-2tch8"] Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.558984 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" path="/var/lib/kubelet/pods/239a471f-04bf-4e9a-b3d2-692026e9ca8d/volumes" Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.559729 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78ca937e-35ea-48eb-b659-705a105a7578" path="/var/lib/kubelet/pods/78ca937e-35ea-48eb-b659-705a105a7578/volumes" Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.560495 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c" path="/var/lib/kubelet/pods/ad1794ca-ecfc-4d90-bcf7-aa5bcda7f44c/volumes" Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.561801 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0f5e384-94e8-4a68-ae6b-8cd5e05d85df" path="/var/lib/kubelet/pods/b0f5e384-94e8-4a68-ae6b-8cd5e05d85df/volumes" Jan 28 12:53:56 crc kubenswrapper[4685]: I0128 12:53:56.562543 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" path="/var/lib/kubelet/pods/f76048d5-14a9-4f65-92be-5fd02e186453/volumes" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.884231 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-wqzs5/must-gather-cpdsl"] Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885136 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885153 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885190 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f5e384-94e8-4a68-ae6b-8cd5e05d85df" containerName="mariadb-account-delete" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885199 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f5e384-94e8-4a68-ae6b-8cd5e05d85df" containerName="mariadb-account-delete" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885211 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885219 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885236 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885243 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885250 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885256 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885271 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885280 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885294 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885300 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885314 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885321 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: E0128 12:54:20.885336 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885342 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885474 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f5e384-94e8-4a68-ae6b-8cd5e05d85df" containerName="mariadb-account-delete" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885491 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885500 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885506 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885517 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f5460a2-cb8a-44aa-a4e5-44109e751133" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885524 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="239a471f-04bf-4e9a-b3d2-692026e9ca8d" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885538 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885550 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a15e6ca-b616-402c-b742-2ba7acbf5763" containerName="glance-httpd" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.885560 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="f76048d5-14a9-4f65-92be-5fd02e186453" containerName="glance-log" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.886456 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.887548 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/5d3926de-6c03-4c49-a020-28831263dbe2-must-gather-output\") pod \"must-gather-cpdsl\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.887777 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4dsd\" (UniqueName: \"kubernetes.io/projected/5d3926de-6c03-4c49-a020-28831263dbe2-kube-api-access-z4dsd\") pod \"must-gather-cpdsl\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.889467 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-wqzs5"/"kube-root-ca.crt" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.889486 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-wqzs5"/"openshift-service-ca.crt" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.897956 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-wqzs5/must-gather-cpdsl"] Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.988426 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/5d3926de-6c03-4c49-a020-28831263dbe2-must-gather-output\") pod \"must-gather-cpdsl\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.988505 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4dsd\" (UniqueName: \"kubernetes.io/projected/5d3926de-6c03-4c49-a020-28831263dbe2-kube-api-access-z4dsd\") pod \"must-gather-cpdsl\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:20 crc kubenswrapper[4685]: I0128 12:54:20.988964 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/5d3926de-6c03-4c49-a020-28831263dbe2-must-gather-output\") pod \"must-gather-cpdsl\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:21 crc kubenswrapper[4685]: I0128 12:54:21.006852 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4dsd\" (UniqueName: \"kubernetes.io/projected/5d3926de-6c03-4c49-a020-28831263dbe2-kube-api-access-z4dsd\") pod \"must-gather-cpdsl\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:21 crc kubenswrapper[4685]: I0128 12:54:21.217079 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:54:21 crc kubenswrapper[4685]: I0128 12:54:21.624224 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-wqzs5/must-gather-cpdsl"] Jan 28 12:54:21 crc kubenswrapper[4685]: I0128 12:54:21.637544 4685 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 12:54:22 crc kubenswrapper[4685]: I0128 12:54:22.492204 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" event={"ID":"5d3926de-6c03-4c49-a020-28831263dbe2","Type":"ContainerStarted","Data":"ebca676f6a5c8f744ef9b6c5bde509d158fe7f27777962c634415b3c1c295f5b"} Jan 28 12:54:29 crc kubenswrapper[4685]: I0128 12:54:29.550635 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" event={"ID":"5d3926de-6c03-4c49-a020-28831263dbe2","Type":"ContainerStarted","Data":"37927dfa4952b22ccdded8b02066406a63f004fafd5c8344727d21f6d3063e88"} Jan 28 12:54:29 crc kubenswrapper[4685]: I0128 12:54:29.551226 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" event={"ID":"5d3926de-6c03-4c49-a020-28831263dbe2","Type":"ContainerStarted","Data":"02e3ff53ab139c6b21b0c55d499c6aa07cf33763f28f1d8be3a54bd84fed80f3"} Jan 28 12:54:29 crc kubenswrapper[4685]: I0128 12:54:29.570709 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" podStartSLOduration=2.524338204 podStartE2EDuration="9.570691439s" podCreationTimestamp="2026-01-28 12:54:20 +0000 UTC" firstStartedPulling="2026-01-28 12:54:21.637474657 +0000 UTC m=+2012.724888492" lastFinishedPulling="2026-01-28 12:54:28.683827892 +0000 UTC m=+2019.771241727" observedRunningTime="2026-01-28 12:54:29.563969918 +0000 UTC m=+2020.651383753" watchObservedRunningTime="2026-01-28 12:54:29.570691439 +0000 UTC m=+2020.658105274" Jan 28 12:54:52 crc kubenswrapper[4685]: I0128 12:54:52.396535 4685 scope.go:117] "RemoveContainer" containerID="598cc903becf3fee5c74081d1912af8c207d621c2d547575eb7dc889b5ccb598" Jan 28 12:54:52 crc kubenswrapper[4685]: I0128 12:54:52.423843 4685 scope.go:117] "RemoveContainer" containerID="bbea7998d370cf9b7d1f9257e48712f0a2c7f7b8d3d2d4156b93ea3a4f6d3e1b" Jan 28 12:54:52 crc kubenswrapper[4685]: I0128 12:54:52.456700 4685 scope.go:117] "RemoveContainer" containerID="367e689a8258a1fb3096eeaefbc08c5ab298b0bab15a7d608dcf303260e6e3d0" Jan 28 12:54:57 crc kubenswrapper[4685]: I0128 12:54:57.069241 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:54:57 crc kubenswrapper[4685]: I0128 12:54:57.069841 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.417110 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j_a6f26466-01ba-4d9b-9e8e-80baf6e27ea7/util/0.log" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.536773 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j_a6f26466-01ba-4d9b-9e8e-80baf6e27ea7/util/0.log" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.562960 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j_a6f26466-01ba-4d9b-9e8e-80baf6e27ea7/pull/0.log" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.598501 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j_a6f26466-01ba-4d9b-9e8e-80baf6e27ea7/pull/0.log" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.780325 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j_a6f26466-01ba-4d9b-9e8e-80baf6e27ea7/util/0.log" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.784723 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j_a6f26466-01ba-4d9b-9e8e-80baf6e27ea7/extract/0.log" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.838396 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_56f73eb2e138ef03e57c707ba43538804f9780803eb22582e70dff8d859j88j_a6f26466-01ba-4d9b-9e8e-80baf6e27ea7/pull/0.log" Jan 28 12:55:06 crc kubenswrapper[4685]: I0128 12:55:06.945144 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm_31079862-762b-46dd-93f6-c627cca53447/util/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.107990 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm_31079862-762b-46dd-93f6-c627cca53447/pull/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.111390 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm_31079862-762b-46dd-93f6-c627cca53447/pull/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.162423 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm_31079862-762b-46dd-93f6-c627cca53447/util/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.284399 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm_31079862-762b-46dd-93f6-c627cca53447/extract/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.285433 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm_31079862-762b-46dd-93f6-c627cca53447/pull/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.297187 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737e8a68050c0284ad40465e1910950d0d8768e294aeeb495ca6340202x74rm_31079862-762b-46dd-93f6-c627cca53447/util/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.436298 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2_79936f73-f97a-4c8d-ba46-ad531e8ed560/util/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.603423 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2_79936f73-f97a-4c8d-ba46-ad531e8ed560/pull/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.614264 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2_79936f73-f97a-4c8d-ba46-ad531e8ed560/util/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.642623 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2_79936f73-f97a-4c8d-ba46-ad531e8ed560/pull/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.763079 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2_79936f73-f97a-4c8d-ba46-ad531e8ed560/pull/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.786704 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2_79936f73-f97a-4c8d-ba46-ad531e8ed560/extract/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.793768 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_920b3933541dd54eb27cdc8c5dcad58318a776ec0e7a3ec14a5289a926mvpn2_79936f73-f97a-4c8d-ba46-ad531e8ed560/util/0.log" Jan 28 12:55:07 crc kubenswrapper[4685]: I0128 12:55:07.929241 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz_d4fd8a66-4af7-42c1-98e0-ed2f03d735fc/util/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.065081 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz_d4fd8a66-4af7-42c1-98e0-ed2f03d735fc/util/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.082513 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz_d4fd8a66-4af7-42c1-98e0-ed2f03d735fc/pull/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.110363 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz_d4fd8a66-4af7-42c1-98e0-ed2f03d735fc/pull/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.226366 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz_d4fd8a66-4af7-42c1-98e0-ed2f03d735fc/util/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.245879 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz_d4fd8a66-4af7-42c1-98e0-ed2f03d735fc/extract/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.266643 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590lkrcz_d4fd8a66-4af7-42c1-98e0-ed2f03d735fc/pull/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.426816 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd_28604166-5316-49b0-acf2-c59881d7d29c/util/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.541366 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd_28604166-5316-49b0-acf2-c59881d7d29c/util/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.576239 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd_28604166-5316-49b0-acf2-c59881d7d29c/pull/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.598980 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd_28604166-5316-49b0-acf2-c59881d7d29c/pull/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.735778 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd_28604166-5316-49b0-acf2-c59881d7d29c/util/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.780788 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd_28604166-5316-49b0-acf2-c59881d7d29c/pull/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.786475 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4efpl4vd_28604166-5316-49b0-acf2-c59881d7d29c/extract/0.log" Jan 28 12:55:08 crc kubenswrapper[4685]: I0128 12:55:08.916419 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd_539a4ff7-66af-412d-a54a-bfe010e856b4/util/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.054350 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd_539a4ff7-66af-412d-a54a-bfe010e856b4/pull/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.066282 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd_539a4ff7-66af-412d-a54a-bfe010e856b4/pull/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.084988 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd_539a4ff7-66af-412d-a54a-bfe010e856b4/util/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.256104 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd_539a4ff7-66af-412d-a54a-bfe010e856b4/pull/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.258040 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd_539a4ff7-66af-412d-a54a-bfe010e856b4/util/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.265352 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f75768hgdd_539a4ff7-66af-412d-a54a-bfe010e856b4/extract/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.433273 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m_c588671a-6c05-4991-9345-f9bc8724b0c7/util/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.560612 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m_c588671a-6c05-4991-9345-f9bc8724b0c7/util/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.561563 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m_c588671a-6c05-4991-9345-f9bc8724b0c7/pull/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.571596 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m_c588671a-6c05-4991-9345-f9bc8724b0c7/pull/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.736820 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m_c588671a-6c05-4991-9345-f9bc8724b0c7/pull/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.780012 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m_c588671a-6c05-4991-9345-f9bc8724b0c7/util/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.786976 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40cdh2m_c588671a-6c05-4991-9345-f9bc8724b0c7/extract/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.942021 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-index-kv289_421794c1-13e8-4c5b-8515-2eca0ab070b5/registry-server/0.log" Jan 28 12:55:09 crc kubenswrapper[4685]: I0128 12:55:09.998579 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-d97d5c6-w7s6g_085fc160-da8c-4422-962f-e14e737e5a42/manager/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.008925 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68f7444d68-zkgvn_c1ad1a61-ac72-41ad-8bce-8ac55acf382e/manager/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.175004 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-index-qjv59_46e31e61-86e7-4c28-a833-e524a0220612/registry-server/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.216873 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-645c8ff456-fl4nf_d9afeea9-b228-4b11-8068-079d2093771c/manager/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.451338 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-index-jbffg_95a2d809-2dbf-456c-accd-390fff7ad267/registry-server/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.539555 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7bbbd9668d-jpf96_7c286e16-1bf9-411c-9787-5a51a2ca1adc/manager/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.567967 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-index-5m4xf_cd4ba4c3-6264-4365-a295-68b73a5efa21/registry-server/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.701113 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5c6689bf87-nwxg8_ea53fa95-6178-4658-a817-773696aac856/manager/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.807762 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-index-r6b6g_5c808be2-bfa1-446b-adc0-1066d556746e/registry-server/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.837401 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-779fc9694b-xtpr9_dd150285-6b2b-446f-9983-b207a55d160d/operator/0.log" Jan 28 12:55:10 crc kubenswrapper[4685]: I0128 12:55:10.951413 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-index-5lgtn_e7b04b51-2455-470a-b67a-7bffd1f97b7a/registry-server/0.log" Jan 28 12:55:11 crc kubenswrapper[4685]: I0128 12:55:11.051047 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-c8fb945fd-4b7jn_98ccfb23-1658-40c4-bb6a-30e64771d98a/manager/0.log" Jan 28 12:55:11 crc kubenswrapper[4685]: I0128 12:55:11.117152 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-index-97nxq_fbf9a561-e551-4c1f-9ac1-ac930e8fc5ec/registry-server/0.log" Jan 28 12:55:21 crc kubenswrapper[4685]: I0128 12:55:21.033288 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-db-create-6nbgl"] Jan 28 12:55:21 crc kubenswrapper[4685]: I0128 12:55:21.040028 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-db-create-6nbgl"] Jan 28 12:55:22 crc kubenswrapper[4685]: I0128 12:55:22.553556 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9126ced2-36b8-48c1-97e2-22a4d5b69ec9" path="/var/lib/kubelet/pods/9126ced2-36b8-48c1-97e2-22a4d5b69ec9/volumes" Jan 28 12:55:25 crc kubenswrapper[4685]: I0128 12:55:25.352429 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/2.log" Jan 28 12:55:25 crc kubenswrapper[4685]: I0128 12:55:25.352542 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jbtd9_0aa6b49a-8078-44f4-b1a9-2542d5bad461/control-plane-machine-set-operator/1.log" Jan 28 12:55:25 crc kubenswrapper[4685]: I0128 12:55:25.531547 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-xcd5g_a7ad2819-235d-4f6d-b731-6f8e31db0b13/machine-api-operator/0.log" Jan 28 12:55:25 crc kubenswrapper[4685]: I0128 12:55:25.549104 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-xcd5g_a7ad2819-235d-4f6d-b731-6f8e31db0b13/kube-rbac-proxy/0.log" Jan 28 12:55:27 crc kubenswrapper[4685]: I0128 12:55:27.023526 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-9121-account-create-update-xl2qc"] Jan 28 12:55:27 crc kubenswrapper[4685]: I0128 12:55:27.028644 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-9121-account-create-update-xl2qc"] Jan 28 12:55:27 crc kubenswrapper[4685]: I0128 12:55:27.069139 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:55:27 crc kubenswrapper[4685]: I0128 12:55:27.069226 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:55:28 crc kubenswrapper[4685]: I0128 12:55:28.553550 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5aa289a8-1ea3-4559-8d70-cab87d8fd9d9" path="/var/lib/kubelet/pods/5aa289a8-1ea3-4559-8d70-cab87d8fd9d9/volumes" Jan 28 12:55:42 crc kubenswrapper[4685]: I0128 12:55:42.030394 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-tpztp"] Jan 28 12:55:42 crc kubenswrapper[4685]: I0128 12:55:42.037499 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-db-sync-tpztp"] Jan 28 12:55:42 crc kubenswrapper[4685]: I0128 12:55:42.555284 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f458232-c94a-45fa-a093-f9d9d1d09541" path="/var/lib/kubelet/pods/4f458232-c94a-45fa-a093-f9d9d1d09541/volumes" Jan 28 12:55:49 crc kubenswrapper[4685]: I0128 12:55:49.033728 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-sbvqt"] Jan 28 12:55:49 crc kubenswrapper[4685]: I0128 12:55:49.052143 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["glance-kuttl-tests/keystone-bootstrap-sbvqt"] Jan 28 12:55:50 crc kubenswrapper[4685]: I0128 12:55:50.554477 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a" path="/var/lib/kubelet/pods/a2fc2e07-b32b-4b9c-b0ea-211e82f3d87a/volumes" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.584455 4685 scope.go:117] "RemoveContainer" containerID="b7bec0b6ca776dc6d4f57e6880bfa3cd301c90bb12b1346a3e404634644bb00f" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.637420 4685 scope.go:117] "RemoveContainer" containerID="63e3b78192d6728588ca054b56757e722ca0e7419547cd935613cad3ff248eaf" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.675953 4685 scope.go:117] "RemoveContainer" containerID="edd16fad9dc515cf2967026d77e8e3e6a14761b4d5266c917c303dc2c1cc7874" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.700045 4685 scope.go:117] "RemoveContainer" containerID="1da72d9f4c747d5cce89036a384f1636bf58722ca89158cb889e49ad148d5e39" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.731138 4685 scope.go:117] "RemoveContainer" containerID="ab0b30dc02396b83bf1f61c9d41a3aff0baf1a6e769900e5f4c55da7e2bf2004" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.764002 4685 scope.go:117] "RemoveContainer" containerID="1c403e054423ab204342fc1da39746eda0684a5e64958a3f8faf6a628c02add4" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.822441 4685 scope.go:117] "RemoveContainer" containerID="fcac3c3a3e0d9f13e189e87b4daf192db1405d55bdcf80c26e3b263aac8b8503" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.857374 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-qpvfn_e03e4d40-b0e2-41c8-aecc-2386d96fff4c/kube-rbac-proxy/0.log" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.883359 4685 scope.go:117] "RemoveContainer" containerID="1c90c4c0180e4e7a42fe9c87dfe1f824b46c3fdee916c92ed93d10d3225a5d44" Jan 28 12:55:52 crc kubenswrapper[4685]: I0128 12:55:52.905219 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-qpvfn_e03e4d40-b0e2-41c8-aecc-2386d96fff4c/controller/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.061843 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-frr-files/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.238556 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-reloader/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.241059 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-frr-files/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.264153 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-reloader/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.272041 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-metrics/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.436492 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-frr-files/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.438596 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-reloader/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.453661 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-metrics/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.489934 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-metrics/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.594124 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-frr-files/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.652025 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-reloader/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.660480 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/cp-metrics/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.730942 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/controller/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.863257 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/kube-rbac-proxy/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.876576 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/frr-metrics/0.log" Jan 28 12:55:53 crc kubenswrapper[4685]: I0128 12:55:53.923856 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/kube-rbac-proxy-frr/0.log" Jan 28 12:55:54 crc kubenswrapper[4685]: I0128 12:55:54.051267 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/reloader/0.log" Jan 28 12:55:54 crc kubenswrapper[4685]: I0128 12:55:54.118542 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-tr9p9_5f6bbfce-281d-41ee-8911-98fd13d2cdf7/frr-k8s-webhook-server/0.log" Jan 28 12:55:54 crc kubenswrapper[4685]: I0128 12:55:54.393992 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lfz8d_42db1da2-22b1-4020-92ee-e29273e09efa/frr/0.log" Jan 28 12:55:54 crc kubenswrapper[4685]: I0128 12:55:54.484686 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-686c4fd867-8j42k_180820f2-a4dc-4daf-9e46-f065f20cb559/manager/0.log" Jan 28 12:55:54 crc kubenswrapper[4685]: I0128 12:55:54.617575 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-d6dcdcfd6-bjcnh_1323c35f-78a9-41e6-971c-0556d1bbdade/webhook-server/0.log" Jan 28 12:55:54 crc kubenswrapper[4685]: I0128 12:55:54.730596 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-l6dv2_0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55/kube-rbac-proxy/0.log" Jan 28 12:55:54 crc kubenswrapper[4685]: I0128 12:55:54.791006 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-l6dv2_0c9dfbc9-b8f0-4ba7-8ffa-72f9349c5d55/speaker/0.log" Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.069946 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.070219 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.070263 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.070860 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"be50d96e347898fc35ed6dffd9d30ab7cf139d963b1551cf0b7bbde10cd2350c"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.070921 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://be50d96e347898fc35ed6dffd9d30ab7cf139d963b1551cf0b7bbde10cd2350c" gracePeriod=600 Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.207386 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="be50d96e347898fc35ed6dffd9d30ab7cf139d963b1551cf0b7bbde10cd2350c" exitCode=0 Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.207439 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"be50d96e347898fc35ed6dffd9d30ab7cf139d963b1551cf0b7bbde10cd2350c"} Jan 28 12:55:57 crc kubenswrapper[4685]: I0128 12:55:57.207474 4685 scope.go:117] "RemoveContainer" containerID="8e7355562f93f61f7bf2eff420b7a9cb23a2848e86ba404e6d1a5e6d17381a65" Jan 28 12:55:58 crc kubenswrapper[4685]: I0128 12:55:58.216652 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerStarted","Data":"2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283"} Jan 28 12:56:08 crc kubenswrapper[4685]: I0128 12:56:08.292846 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_keystone-784968dcbc-drlnm_782c1b4b-61bc-4cdb-8dc6-421d29d6b874/keystone-api/0.log" Jan 28 12:56:08 crc kubenswrapper[4685]: I0128 12:56:08.314029 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-0_ae11d7ca-ae3f-41f9-b510-18fde2492aa2/mysql-bootstrap/0.log" Jan 28 12:56:08 crc kubenswrapper[4685]: I0128 12:56:08.454561 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-0_ae11d7ca-ae3f-41f9-b510-18fde2492aa2/mysql-bootstrap/0.log" Jan 28 12:56:08 crc kubenswrapper[4685]: I0128 12:56:08.531247 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-0_ae11d7ca-ae3f-41f9-b510-18fde2492aa2/galera/0.log" Jan 28 12:56:08 crc kubenswrapper[4685]: I0128 12:56:08.700321 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-1_a53ef3ad-6fae-4b22-a498-cb541237093d/mysql-bootstrap/0.log" Jan 28 12:56:08 crc kubenswrapper[4685]: I0128 12:56:08.832887 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-1_a53ef3ad-6fae-4b22-a498-cb541237093d/mysql-bootstrap/0.log" Jan 28 12:56:08 crc kubenswrapper[4685]: I0128 12:56:08.945472 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-1_a53ef3ad-6fae-4b22-a498-cb541237093d/galera/0.log" Jan 28 12:56:09 crc kubenswrapper[4685]: I0128 12:56:09.089436 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-2_4f3b56e1-537e-46b6-b9fa-78b735d4721c/mysql-bootstrap/0.log" Jan 28 12:56:09 crc kubenswrapper[4685]: I0128 12:56:09.233827 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-2_4f3b56e1-537e-46b6-b9fa-78b735d4721c/mysql-bootstrap/0.log" Jan 28 12:56:09 crc kubenswrapper[4685]: I0128 12:56:09.275070 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstack-galera-2_4f3b56e1-537e-46b6-b9fa-78b735d4721c/galera/0.log" Jan 28 12:56:09 crc kubenswrapper[4685]: I0128 12:56:09.414918 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_openstackclient_2a47bf9a-a943-4bcc-8a19-be4f8db1cfa7/openstackclient/0.log" Jan 28 12:56:09 crc kubenswrapper[4685]: I0128 12:56:09.668476 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_rabbitmq-server-0_b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb/setup-container/0.log" Jan 28 12:56:09 crc kubenswrapper[4685]: I0128 12:56:09.814023 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_rabbitmq-server-0_b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb/setup-container/0.log" Jan 28 12:56:09 crc kubenswrapper[4685]: I0128 12:56:09.879674 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_rabbitmq-server-0_b5afbbf4-34f6-46ad-9ca3-55d275f6b8cb/rabbitmq/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.035005 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-proxy-cb6f749b7-85dsn_4a145e58-4189-4128-981c-48608a766854/proxy-httpd/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.086894 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-proxy-cb6f749b7-85dsn_4a145e58-4189-4128-981c-48608a766854/proxy-server/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.283462 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-ring-rebalance-q4x4r_1d674f3f-5708-4b77-85fa-7fcfea43d98d/swift-ring-rebalance/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.385236 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/account-auditor/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.433895 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_memcached-0_298c85ee-7f53-4f03-908b-42d6773623a6/memcached/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.498757 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/account-reaper/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.535712 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/account-server/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.564564 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/account-replicator/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.619805 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/container-auditor/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.740767 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/container-replicator/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.740904 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/container-updater/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.748881 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/container-server/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.839181 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/object-auditor/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.934636 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/object-replicator/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.942898 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/object-expirer/0.log" Jan 28 12:56:10 crc kubenswrapper[4685]: I0128 12:56:10.943432 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/object-server/0.log" Jan 28 12:56:11 crc kubenswrapper[4685]: I0128 12:56:11.040043 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/object-updater/0.log" Jan 28 12:56:11 crc kubenswrapper[4685]: I0128 12:56:11.109923 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/rsync/0.log" Jan 28 12:56:11 crc kubenswrapper[4685]: I0128 12:56:11.130528 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/glance-kuttl-tests_swift-storage-0_031a6182-897b-45d0-a48f-a9473aebe554/swift-recon-cron/0.log" Jan 28 12:56:22 crc kubenswrapper[4685]: I0128 12:56:22.978300 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q_9444ee44-142c-4fad-a9c3-9ab30638416d/util/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.157188 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q_9444ee44-142c-4fad-a9c3-9ab30638416d/pull/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.196349 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q_9444ee44-142c-4fad-a9c3-9ab30638416d/util/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.198182 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q_9444ee44-142c-4fad-a9c3-9ab30638416d/pull/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.304462 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q_9444ee44-142c-4fad-a9c3-9ab30638416d/util/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.320572 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q_9444ee44-142c-4fad-a9c3-9ab30638416d/pull/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.368081 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcps44q_9444ee44-142c-4fad-a9c3-9ab30638416d/extract/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.470497 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9ww8x_f21418ca-c434-4aec-95f1-6be063d5f927/extract-utilities/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.639258 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9ww8x_f21418ca-c434-4aec-95f1-6be063d5f927/extract-content/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.658638 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9ww8x_f21418ca-c434-4aec-95f1-6be063d5f927/extract-content/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.668012 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9ww8x_f21418ca-c434-4aec-95f1-6be063d5f927/extract-utilities/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.814645 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9ww8x_f21418ca-c434-4aec-95f1-6be063d5f927/extract-content/0.log" Jan 28 12:56:23 crc kubenswrapper[4685]: I0128 12:56:23.823704 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9ww8x_f21418ca-c434-4aec-95f1-6be063d5f927/extract-utilities/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.011473 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-jkngf_04f395cb-91b3-4fc2-a434-1db8e1a2d32f/extract-utilities/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.226734 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-9ww8x_f21418ca-c434-4aec-95f1-6be063d5f927/registry-server/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.267607 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-jkngf_04f395cb-91b3-4fc2-a434-1db8e1a2d32f/extract-utilities/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.280649 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-jkngf_04f395cb-91b3-4fc2-a434-1db8e1a2d32f/extract-content/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.297212 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-jkngf_04f395cb-91b3-4fc2-a434-1db8e1a2d32f/extract-content/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.490100 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-jkngf_04f395cb-91b3-4fc2-a434-1db8e1a2d32f/extract-utilities/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.537297 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-jkngf_04f395cb-91b3-4fc2-a434-1db8e1a2d32f/extract-content/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.822961 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vf9sl_94dd5e0c-ac23-490c-a1da-3e08ca35ecda/extract-utilities/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.859841 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-7qzcb_e4297ea9-021a-467f-b78b-89ba6ae5a6b1/marketplace-operator/0.log" Jan 28 12:56:24 crc kubenswrapper[4685]: I0128 12:56:24.991624 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-jkngf_04f395cb-91b3-4fc2-a434-1db8e1a2d32f/registry-server/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.040654 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vf9sl_94dd5e0c-ac23-490c-a1da-3e08ca35ecda/extract-utilities/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.064347 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vf9sl_94dd5e0c-ac23-490c-a1da-3e08ca35ecda/extract-content/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.074888 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vf9sl_94dd5e0c-ac23-490c-a1da-3e08ca35ecda/extract-content/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.231919 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vf9sl_94dd5e0c-ac23-490c-a1da-3e08ca35ecda/extract-utilities/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.237972 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vf9sl_94dd5e0c-ac23-490c-a1da-3e08ca35ecda/extract-content/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.375571 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vf9sl_94dd5e0c-ac23-490c-a1da-3e08ca35ecda/registry-server/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.434515 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bfldq_65420e05-082f-4ad4-94f9-01079a28394b/extract-utilities/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.588492 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bfldq_65420e05-082f-4ad4-94f9-01079a28394b/extract-content/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.594933 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bfldq_65420e05-082f-4ad4-94f9-01079a28394b/extract-content/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.598286 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bfldq_65420e05-082f-4ad4-94f9-01079a28394b/extract-utilities/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.726695 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bfldq_65420e05-082f-4ad4-94f9-01079a28394b/extract-utilities/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.792021 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bfldq_65420e05-082f-4ad4-94f9-01079a28394b/extract-content/0.log" Jan 28 12:56:25 crc kubenswrapper[4685]: I0128 12:56:25.870159 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bfldq_65420e05-082f-4ad4-94f9-01079a28394b/registry-server/0.log" Jan 28 12:56:53 crc kubenswrapper[4685]: I0128 12:56:53.042866 4685 scope.go:117] "RemoveContainer" containerID="6b28877cc2816532cbc2020dea0cd2ab4532b6ec08d813eca9589228f7ae4fce" Jan 28 12:56:53 crc kubenswrapper[4685]: I0128 12:56:53.287094 4685 scope.go:117] "RemoveContainer" containerID="3c2bee5a5a7b328e99be338aaf06f86b8d50d5f12e3143178eca52eb0cb2f202" Jan 28 12:56:53 crc kubenswrapper[4685]: I0128 12:56:53.317363 4685 scope.go:117] "RemoveContainer" containerID="9d8e4d57e3173ecb8767869a6018812a51c2c816324f2d83ec5098504d828e30" Jan 28 12:56:53 crc kubenswrapper[4685]: I0128 12:56:53.415445 4685 scope.go:117] "RemoveContainer" containerID="1ed467b0f59b8213320e6ec49be1c4fc34787d685c8075ca1cc281174d53cadb" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.301583 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2bz9l"] Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.303061 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.315752 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2bz9l"] Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.370412 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-utilities\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.370510 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75w29\" (UniqueName: \"kubernetes.io/projected/4c9afe45-e5b9-4545-8fa7-33ff058f560d-kube-api-access-75w29\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.370548 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-catalog-content\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.472507 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75w29\" (UniqueName: \"kubernetes.io/projected/4c9afe45-e5b9-4545-8fa7-33ff058f560d-kube-api-access-75w29\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.472571 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-catalog-content\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.472671 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-utilities\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.473127 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-catalog-content\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.473495 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-utilities\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.491496 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75w29\" (UniqueName: \"kubernetes.io/projected/4c9afe45-e5b9-4545-8fa7-33ff058f560d-kube-api-access-75w29\") pod \"community-operators-2bz9l\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.628435 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:00 crc kubenswrapper[4685]: I0128 12:57:00.936848 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2bz9l"] Jan 28 12:57:01 crc kubenswrapper[4685]: I0128 12:57:01.681742 4685 generic.go:334] "Generic (PLEG): container finished" podID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerID="8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0" exitCode=0 Jan 28 12:57:01 crc kubenswrapper[4685]: I0128 12:57:01.681832 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2bz9l" event={"ID":"4c9afe45-e5b9-4545-8fa7-33ff058f560d","Type":"ContainerDied","Data":"8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0"} Jan 28 12:57:01 crc kubenswrapper[4685]: I0128 12:57:01.682022 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2bz9l" event={"ID":"4c9afe45-e5b9-4545-8fa7-33ff058f560d","Type":"ContainerStarted","Data":"8815bd965b005d0cc44feb205f27bdba8b018f1e431d52fb0c5bb3ca2bd02119"} Jan 28 12:57:04 crc kubenswrapper[4685]: I0128 12:57:04.707386 4685 generic.go:334] "Generic (PLEG): container finished" podID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerID="1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459" exitCode=0 Jan 28 12:57:04 crc kubenswrapper[4685]: I0128 12:57:04.707444 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2bz9l" event={"ID":"4c9afe45-e5b9-4545-8fa7-33ff058f560d","Type":"ContainerDied","Data":"1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459"} Jan 28 12:57:06 crc kubenswrapper[4685]: I0128 12:57:06.743468 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2bz9l" event={"ID":"4c9afe45-e5b9-4545-8fa7-33ff058f560d","Type":"ContainerStarted","Data":"668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d"} Jan 28 12:57:06 crc kubenswrapper[4685]: I0128 12:57:06.766257 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2bz9l" podStartSLOduration=2.754699175 podStartE2EDuration="6.766241896s" podCreationTimestamp="2026-01-28 12:57:00 +0000 UTC" firstStartedPulling="2026-01-28 12:57:01.683873281 +0000 UTC m=+2172.771287156" lastFinishedPulling="2026-01-28 12:57:05.695416002 +0000 UTC m=+2176.782829877" observedRunningTime="2026-01-28 12:57:06.764357913 +0000 UTC m=+2177.851771758" watchObservedRunningTime="2026-01-28 12:57:06.766241896 +0000 UTC m=+2177.853655731" Jan 28 12:57:10 crc kubenswrapper[4685]: I0128 12:57:10.628559 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:10 crc kubenswrapper[4685]: I0128 12:57:10.630034 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:10 crc kubenswrapper[4685]: I0128 12:57:10.679834 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:11 crc kubenswrapper[4685]: I0128 12:57:11.829045 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:12 crc kubenswrapper[4685]: I0128 12:57:12.884659 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2bz9l"] Jan 28 12:57:13 crc kubenswrapper[4685]: I0128 12:57:13.796823 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2bz9l" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="registry-server" containerID="cri-o://668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d" gracePeriod=2 Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.214879 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.322260 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-utilities\") pod \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.322709 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-catalog-content\") pod \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.322872 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75w29\" (UniqueName: \"kubernetes.io/projected/4c9afe45-e5b9-4545-8fa7-33ff058f560d-kube-api-access-75w29\") pod \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\" (UID: \"4c9afe45-e5b9-4545-8fa7-33ff058f560d\") " Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.323540 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-utilities" (OuterVolumeSpecName: "utilities") pod "4c9afe45-e5b9-4545-8fa7-33ff058f560d" (UID: "4c9afe45-e5b9-4545-8fa7-33ff058f560d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.324103 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.327768 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c9afe45-e5b9-4545-8fa7-33ff058f560d-kube-api-access-75w29" (OuterVolumeSpecName: "kube-api-access-75w29") pod "4c9afe45-e5b9-4545-8fa7-33ff058f560d" (UID: "4c9afe45-e5b9-4545-8fa7-33ff058f560d"). InnerVolumeSpecName "kube-api-access-75w29". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.384426 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c9afe45-e5b9-4545-8fa7-33ff058f560d" (UID: "4c9afe45-e5b9-4545-8fa7-33ff058f560d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.425334 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9afe45-e5b9-4545-8fa7-33ff058f560d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.425374 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75w29\" (UniqueName: \"kubernetes.io/projected/4c9afe45-e5b9-4545-8fa7-33ff058f560d-kube-api-access-75w29\") on node \"crc\" DevicePath \"\"" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.807549 4685 generic.go:334] "Generic (PLEG): container finished" podID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerID="668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d" exitCode=0 Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.807607 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2bz9l" event={"ID":"4c9afe45-e5b9-4545-8fa7-33ff058f560d","Type":"ContainerDied","Data":"668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d"} Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.807624 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2bz9l" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.807645 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2bz9l" event={"ID":"4c9afe45-e5b9-4545-8fa7-33ff058f560d","Type":"ContainerDied","Data":"8815bd965b005d0cc44feb205f27bdba8b018f1e431d52fb0c5bb3ca2bd02119"} Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.807669 4685 scope.go:117] "RemoveContainer" containerID="668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.830163 4685 scope.go:117] "RemoveContainer" containerID="1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.832083 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2bz9l"] Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.839542 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2bz9l"] Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.853020 4685 scope.go:117] "RemoveContainer" containerID="8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.885248 4685 scope.go:117] "RemoveContainer" containerID="668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d" Jan 28 12:57:14 crc kubenswrapper[4685]: E0128 12:57:14.885730 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d\": container with ID starting with 668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d not found: ID does not exist" containerID="668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.885767 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d"} err="failed to get container status \"668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d\": rpc error: code = NotFound desc = could not find container \"668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d\": container with ID starting with 668af4a2dac5c2880bbb87f00cc04fa4156ffb03ac5bfe8e6bf201d24127827d not found: ID does not exist" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.885795 4685 scope.go:117] "RemoveContainer" containerID="1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459" Jan 28 12:57:14 crc kubenswrapper[4685]: E0128 12:57:14.886075 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459\": container with ID starting with 1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459 not found: ID does not exist" containerID="1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.886096 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459"} err="failed to get container status \"1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459\": rpc error: code = NotFound desc = could not find container \"1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459\": container with ID starting with 1e2b30821035d768d81a45e8fc7846672bc7da6991eddadfc8582ba3383d2459 not found: ID does not exist" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.886113 4685 scope.go:117] "RemoveContainer" containerID="8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0" Jan 28 12:57:14 crc kubenswrapper[4685]: E0128 12:57:14.886338 4685 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0\": container with ID starting with 8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0 not found: ID does not exist" containerID="8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0" Jan 28 12:57:14 crc kubenswrapper[4685]: I0128 12:57:14.886364 4685 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0"} err="failed to get container status \"8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0\": rpc error: code = NotFound desc = could not find container \"8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0\": container with ID starting with 8b027c0e0a51357de89fe2a0b0cea63b3f593bed6553da3af19d720420c673f0 not found: ID does not exist" Jan 28 12:57:16 crc kubenswrapper[4685]: I0128 12:57:16.555985 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" path="/var/lib/kubelet/pods/4c9afe45-e5b9-4545-8fa7-33ff058f560d/volumes" Jan 28 12:57:41 crc kubenswrapper[4685]: I0128 12:57:41.030988 4685 generic.go:334] "Generic (PLEG): container finished" podID="5d3926de-6c03-4c49-a020-28831263dbe2" containerID="02e3ff53ab139c6b21b0c55d499c6aa07cf33763f28f1d8be3a54bd84fed80f3" exitCode=0 Jan 28 12:57:41 crc kubenswrapper[4685]: I0128 12:57:41.031070 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" event={"ID":"5d3926de-6c03-4c49-a020-28831263dbe2","Type":"ContainerDied","Data":"02e3ff53ab139c6b21b0c55d499c6aa07cf33763f28f1d8be3a54bd84fed80f3"} Jan 28 12:57:41 crc kubenswrapper[4685]: I0128 12:57:41.032036 4685 scope.go:117] "RemoveContainer" containerID="02e3ff53ab139c6b21b0c55d499c6aa07cf33763f28f1d8be3a54bd84fed80f3" Jan 28 12:57:41 crc kubenswrapper[4685]: I0128 12:57:41.559089 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wqzs5_must-gather-cpdsl_5d3926de-6c03-4c49-a020-28831263dbe2/gather/0.log" Jan 28 12:57:48 crc kubenswrapper[4685]: I0128 12:57:48.316243 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-wqzs5/must-gather-cpdsl"] Jan 28 12:57:48 crc kubenswrapper[4685]: I0128 12:57:48.317160 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" containerName="copy" containerID="cri-o://37927dfa4952b22ccdded8b02066406a63f004fafd5c8344727d21f6d3063e88" gracePeriod=2 Jan 28 12:57:48 crc kubenswrapper[4685]: I0128 12:57:48.324263 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-wqzs5/must-gather-cpdsl"] Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.094457 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wqzs5_must-gather-cpdsl_5d3926de-6c03-4c49-a020-28831263dbe2/copy/0.log" Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.095285 4685 generic.go:334] "Generic (PLEG): container finished" podID="5d3926de-6c03-4c49-a020-28831263dbe2" containerID="37927dfa4952b22ccdded8b02066406a63f004fafd5c8344727d21f6d3063e88" exitCode=143 Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.345366 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wqzs5_must-gather-cpdsl_5d3926de-6c03-4c49-a020-28831263dbe2/copy/0.log" Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.345860 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.502055 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4dsd\" (UniqueName: \"kubernetes.io/projected/5d3926de-6c03-4c49-a020-28831263dbe2-kube-api-access-z4dsd\") pod \"5d3926de-6c03-4c49-a020-28831263dbe2\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.502195 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/5d3926de-6c03-4c49-a020-28831263dbe2-must-gather-output\") pod \"5d3926de-6c03-4c49-a020-28831263dbe2\" (UID: \"5d3926de-6c03-4c49-a020-28831263dbe2\") " Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.510151 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d3926de-6c03-4c49-a020-28831263dbe2-kube-api-access-z4dsd" (OuterVolumeSpecName: "kube-api-access-z4dsd") pod "5d3926de-6c03-4c49-a020-28831263dbe2" (UID: "5d3926de-6c03-4c49-a020-28831263dbe2"). InnerVolumeSpecName "kube-api-access-z4dsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.591338 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d3926de-6c03-4c49-a020-28831263dbe2-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "5d3926de-6c03-4c49-a020-28831263dbe2" (UID: "5d3926de-6c03-4c49-a020-28831263dbe2"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.604367 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4dsd\" (UniqueName: \"kubernetes.io/projected/5d3926de-6c03-4c49-a020-28831263dbe2-kube-api-access-z4dsd\") on node \"crc\" DevicePath \"\"" Jan 28 12:57:49 crc kubenswrapper[4685]: I0128 12:57:49.604422 4685 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/5d3926de-6c03-4c49-a020-28831263dbe2-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 28 12:57:50 crc kubenswrapper[4685]: I0128 12:57:50.102661 4685 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wqzs5_must-gather-cpdsl_5d3926de-6c03-4c49-a020-28831263dbe2/copy/0.log" Jan 28 12:57:50 crc kubenswrapper[4685]: I0128 12:57:50.103505 4685 scope.go:117] "RemoveContainer" containerID="37927dfa4952b22ccdded8b02066406a63f004fafd5c8344727d21f6d3063e88" Jan 28 12:57:50 crc kubenswrapper[4685]: I0128 12:57:50.103587 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wqzs5/must-gather-cpdsl" Jan 28 12:57:50 crc kubenswrapper[4685]: I0128 12:57:50.126517 4685 scope.go:117] "RemoveContainer" containerID="02e3ff53ab139c6b21b0c55d499c6aa07cf33763f28f1d8be3a54bd84fed80f3" Jan 28 12:57:50 crc kubenswrapper[4685]: I0128 12:57:50.568759 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" path="/var/lib/kubelet/pods/5d3926de-6c03-4c49-a020-28831263dbe2/volumes" Jan 28 12:57:53 crc kubenswrapper[4685]: I0128 12:57:53.517545 4685 scope.go:117] "RemoveContainer" containerID="b71f84ee8acf7ef3aecd638540bb95af6e6943c74eeac4b65a07e35cf75c75cb" Jan 28 12:57:53 crc kubenswrapper[4685]: I0128 12:57:53.540872 4685 scope.go:117] "RemoveContainer" containerID="3216208e75d8c67b16582c129fd898ea5e91d194e129ec283e6e14696c9b0b82" Jan 28 12:57:53 crc kubenswrapper[4685]: I0128 12:57:53.595897 4685 scope.go:117] "RemoveContainer" containerID="ecb09d2dba97e99a4739a4f646ed9c58d2d9f3c2872d2996a98add67c8cd24b0" Jan 28 12:57:53 crc kubenswrapper[4685]: I0128 12:57:53.612982 4685 scope.go:117] "RemoveContainer" containerID="cade24f3843ef21761779849be56f06f2eebe51e3ef70f09a45b83c7ece0b5bd" Jan 28 12:57:57 crc kubenswrapper[4685]: I0128 12:57:57.069688 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:57:57 crc kubenswrapper[4685]: I0128 12:57:57.070288 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:58:27 crc kubenswrapper[4685]: I0128 12:58:27.070301 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:58:27 crc kubenswrapper[4685]: I0128 12:58:27.070923 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:58:54 crc kubenswrapper[4685]: I0128 12:58:54.151429 4685 scope.go:117] "RemoveContainer" containerID="982196cf9d1430c23f61c9f6a591d2e502a122f68dd3d208f2823fe1bcd16495" Jan 28 12:58:54 crc kubenswrapper[4685]: I0128 12:58:54.177300 4685 scope.go:117] "RemoveContainer" containerID="8a9f41f7ac4d14cefb86b26759a10f2a36d29a2f7fb52aa5f9f09af757385cf7" Jan 28 12:58:54 crc kubenswrapper[4685]: I0128 12:58:54.225016 4685 scope.go:117] "RemoveContainer" containerID="8fb9fcad1766edb8d5495cc4d91f9a6d2bc44d76fba4636a9b3d7228538ef936" Jan 28 12:58:54 crc kubenswrapper[4685]: I0128 12:58:54.276781 4685 scope.go:117] "RemoveContainer" containerID="7775e5f2117fc7c2925d3467035960efedd8628cba956f1b78ab9d4952f337b5" Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.069372 4685 patch_prober.go:28] interesting pod/machine-config-daemon-h5wpv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.069802 4685 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.069866 4685 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.070618 4685 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283"} pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.070688 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerName="machine-config-daemon" containerID="cri-o://2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" gracePeriod=600 Jan 28 12:58:57 crc kubenswrapper[4685]: E0128 12:58:57.204842 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.582057 4685 generic.go:334] "Generic (PLEG): container finished" podID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" exitCode=0 Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.582115 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" event={"ID":"c708b01f-11f7-4c21-86c4-92ac3c7e9cb1","Type":"ContainerDied","Data":"2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283"} Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.582467 4685 scope.go:117] "RemoveContainer" containerID="be50d96e347898fc35ed6dffd9d30ab7cf139d963b1551cf0b7bbde10cd2350c" Jan 28 12:58:57 crc kubenswrapper[4685]: I0128 12:58:57.583483 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 12:58:57 crc kubenswrapper[4685]: E0128 12:58:57.584098 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789271 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nngnk"] Jan 28 12:58:59 crc kubenswrapper[4685]: E0128 12:58:59.789610 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" containerName="gather" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789625 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" containerName="gather" Jan 28 12:58:59 crc kubenswrapper[4685]: E0128 12:58:59.789652 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="extract-content" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789660 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="extract-content" Jan 28 12:58:59 crc kubenswrapper[4685]: E0128 12:58:59.789677 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="extract-utilities" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789685 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="extract-utilities" Jan 28 12:58:59 crc kubenswrapper[4685]: E0128 12:58:59.789697 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="registry-server" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789705 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="registry-server" Jan 28 12:58:59 crc kubenswrapper[4685]: E0128 12:58:59.789734 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" containerName="copy" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789744 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" containerName="copy" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789920 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" containerName="copy" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789936 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c9afe45-e5b9-4545-8fa7-33ff058f560d" containerName="registry-server" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.789947 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d3926de-6c03-4c49-a020-28831263dbe2" containerName="gather" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.791095 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.803984 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nngnk"] Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.943531 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-catalog-content\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.943612 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-utilities\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:58:59 crc kubenswrapper[4685]: I0128 12:58:59.943654 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klb4b\" (UniqueName: \"kubernetes.io/projected/cb03ca95-0969-4749-86cc-5d2a3c02f33d-kube-api-access-klb4b\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.044972 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-catalog-content\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.045036 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-utilities\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.045064 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klb4b\" (UniqueName: \"kubernetes.io/projected/cb03ca95-0969-4749-86cc-5d2a3c02f33d-kube-api-access-klb4b\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.045510 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-utilities\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.045544 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-catalog-content\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.073312 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klb4b\" (UniqueName: \"kubernetes.io/projected/cb03ca95-0969-4749-86cc-5d2a3c02f33d-kube-api-access-klb4b\") pod \"redhat-operators-nngnk\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.110887 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.378572 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nngnk"] Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.607661 4685 generic.go:334] "Generic (PLEG): container finished" podID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerID="097dc5366f09bb41caf29de042810c226ecaba564f8fa1f11237879c084b2286" exitCode=0 Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.607709 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nngnk" event={"ID":"cb03ca95-0969-4749-86cc-5d2a3c02f33d","Type":"ContainerDied","Data":"097dc5366f09bb41caf29de042810c226ecaba564f8fa1f11237879c084b2286"} Jan 28 12:59:00 crc kubenswrapper[4685]: I0128 12:59:00.607734 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nngnk" event={"ID":"cb03ca95-0969-4749-86cc-5d2a3c02f33d","Type":"ContainerStarted","Data":"9b6fe0737bae029e58ed655cc3eb854e96a6bfd4abfa51f81dc495a77f5d14d5"} Jan 28 12:59:02 crc kubenswrapper[4685]: I0128 12:59:02.628402 4685 generic.go:334] "Generic (PLEG): container finished" podID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerID="0355da9d9f17cbd72e72f2283ed92c21b34df0c7776f428412ff64213d0bf846" exitCode=0 Jan 28 12:59:02 crc kubenswrapper[4685]: I0128 12:59:02.628465 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nngnk" event={"ID":"cb03ca95-0969-4749-86cc-5d2a3c02f33d","Type":"ContainerDied","Data":"0355da9d9f17cbd72e72f2283ed92c21b34df0c7776f428412ff64213d0bf846"} Jan 28 12:59:03 crc kubenswrapper[4685]: I0128 12:59:03.638360 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nngnk" event={"ID":"cb03ca95-0969-4749-86cc-5d2a3c02f33d","Type":"ContainerStarted","Data":"a69acf550730612d2fb7091a1842757f336d5b1b7407439bb5d27cbee91b251c"} Jan 28 12:59:03 crc kubenswrapper[4685]: I0128 12:59:03.659289 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nngnk" podStartSLOduration=2.057593492 podStartE2EDuration="4.659269856s" podCreationTimestamp="2026-01-28 12:58:59 +0000 UTC" firstStartedPulling="2026-01-28 12:59:00.609284708 +0000 UTC m=+2291.696698543" lastFinishedPulling="2026-01-28 12:59:03.210961042 +0000 UTC m=+2294.298374907" observedRunningTime="2026-01-28 12:59:03.657147156 +0000 UTC m=+2294.744561011" watchObservedRunningTime="2026-01-28 12:59:03.659269856 +0000 UTC m=+2294.746683691" Jan 28 12:59:10 crc kubenswrapper[4685]: I0128 12:59:10.111392 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:10 crc kubenswrapper[4685]: I0128 12:59:10.111842 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:10 crc kubenswrapper[4685]: I0128 12:59:10.171737 4685 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:10 crc kubenswrapper[4685]: I0128 12:59:10.551119 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 12:59:10 crc kubenswrapper[4685]: E0128 12:59:10.552424 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:59:10 crc kubenswrapper[4685]: I0128 12:59:10.730667 4685 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:10 crc kubenswrapper[4685]: I0128 12:59:10.770386 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nngnk"] Jan 28 12:59:12 crc kubenswrapper[4685]: I0128 12:59:12.706605 4685 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nngnk" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="registry-server" containerID="cri-o://a69acf550730612d2fb7091a1842757f336d5b1b7407439bb5d27cbee91b251c" gracePeriod=2 Jan 28 12:59:14 crc kubenswrapper[4685]: I0128 12:59:14.725782 4685 generic.go:334] "Generic (PLEG): container finished" podID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerID="a69acf550730612d2fb7091a1842757f336d5b1b7407439bb5d27cbee91b251c" exitCode=0 Jan 28 12:59:14 crc kubenswrapper[4685]: I0128 12:59:14.725847 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nngnk" event={"ID":"cb03ca95-0969-4749-86cc-5d2a3c02f33d","Type":"ContainerDied","Data":"a69acf550730612d2fb7091a1842757f336d5b1b7407439bb5d27cbee91b251c"} Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.192714 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.282127 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-utilities\") pod \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.282205 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-catalog-content\") pod \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.282228 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klb4b\" (UniqueName: \"kubernetes.io/projected/cb03ca95-0969-4749-86cc-5d2a3c02f33d-kube-api-access-klb4b\") pod \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\" (UID: \"cb03ca95-0969-4749-86cc-5d2a3c02f33d\") " Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.283533 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-utilities" (OuterVolumeSpecName: "utilities") pod "cb03ca95-0969-4749-86cc-5d2a3c02f33d" (UID: "cb03ca95-0969-4749-86cc-5d2a3c02f33d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.287221 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb03ca95-0969-4749-86cc-5d2a3c02f33d-kube-api-access-klb4b" (OuterVolumeSpecName: "kube-api-access-klb4b") pod "cb03ca95-0969-4749-86cc-5d2a3c02f33d" (UID: "cb03ca95-0969-4749-86cc-5d2a3c02f33d"). InnerVolumeSpecName "kube-api-access-klb4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.383320 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klb4b\" (UniqueName: \"kubernetes.io/projected/cb03ca95-0969-4749-86cc-5d2a3c02f33d-kube-api-access-klb4b\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.383351 4685 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.402580 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb03ca95-0969-4749-86cc-5d2a3c02f33d" (UID: "cb03ca95-0969-4749-86cc-5d2a3c02f33d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.485133 4685 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb03ca95-0969-4749-86cc-5d2a3c02f33d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.735559 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nngnk" event={"ID":"cb03ca95-0969-4749-86cc-5d2a3c02f33d","Type":"ContainerDied","Data":"9b6fe0737bae029e58ed655cc3eb854e96a6bfd4abfa51f81dc495a77f5d14d5"} Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.735618 4685 scope.go:117] "RemoveContainer" containerID="a69acf550730612d2fb7091a1842757f336d5b1b7407439bb5d27cbee91b251c" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.735630 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nngnk" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.756676 4685 scope.go:117] "RemoveContainer" containerID="0355da9d9f17cbd72e72f2283ed92c21b34df0c7776f428412ff64213d0bf846" Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.782981 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nngnk"] Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.795827 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nngnk"] Jan 28 12:59:15 crc kubenswrapper[4685]: I0128 12:59:15.801882 4685 scope.go:117] "RemoveContainer" containerID="097dc5366f09bb41caf29de042810c226ecaba564f8fa1f11237879c084b2286" Jan 28 12:59:16 crc kubenswrapper[4685]: I0128 12:59:16.564262 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" path="/var/lib/kubelet/pods/cb03ca95-0969-4749-86cc-5d2a3c02f33d/volumes" Jan 28 12:59:25 crc kubenswrapper[4685]: I0128 12:59:25.546371 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 12:59:25 crc kubenswrapper[4685]: E0128 12:59:25.547453 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:59:39 crc kubenswrapper[4685]: I0128 12:59:39.545945 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 12:59:39 crc kubenswrapper[4685]: E0128 12:59:39.549091 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:59:51 crc kubenswrapper[4685]: I0128 12:59:51.546162 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 12:59:51 crc kubenswrapper[4685]: E0128 12:59:51.548105 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 12:59:54 crc kubenswrapper[4685]: I0128 12:59:54.349701 4685 scope.go:117] "RemoveContainer" containerID="eed369eda4020a8f30e9d8f0a75514122366f4b4382f0ba32dd1c6a67833a260" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.141249 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz"] Jan 28 13:00:00 crc kubenswrapper[4685]: E0128 13:00:00.141856 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="registry-server" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.141873 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="registry-server" Jan 28 13:00:00 crc kubenswrapper[4685]: E0128 13:00:00.141894 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="extract-utilities" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.141903 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="extract-utilities" Jan 28 13:00:00 crc kubenswrapper[4685]: E0128 13:00:00.141917 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="extract-content" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.141925 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="extract-content" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.142065 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb03ca95-0969-4749-86cc-5d2a3c02f33d" containerName="registry-server" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.142641 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.144609 4685 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.147023 4685 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.150531 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz"] Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.328477 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spkbj\" (UniqueName: \"kubernetes.io/projected/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-kube-api-access-spkbj\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.328569 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-config-volume\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.328660 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-secret-volume\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.429681 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spkbj\" (UniqueName: \"kubernetes.io/projected/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-kube-api-access-spkbj\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.429758 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-config-volume\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.429829 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-secret-volume\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.431083 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-config-volume\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.439779 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-secret-volume\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.461972 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spkbj\" (UniqueName: \"kubernetes.io/projected/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-kube-api-access-spkbj\") pod \"collect-profiles-29493420-f2ksz\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.465706 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:00 crc kubenswrapper[4685]: I0128 13:00:00.975753 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz"] Jan 28 13:00:01 crc kubenswrapper[4685]: I0128 13:00:01.084569 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" event={"ID":"68bc2246-25a1-4b4f-bdde-3686ba87ba9a","Type":"ContainerStarted","Data":"fa8ce3d2163f7fe9d12cd574666502858f34c2d9d0226a991ab684757f08bee1"} Jan 28 13:00:02 crc kubenswrapper[4685]: I0128 13:00:02.095505 4685 generic.go:334] "Generic (PLEG): container finished" podID="68bc2246-25a1-4b4f-bdde-3686ba87ba9a" containerID="04b16f369a0e52237b61d15a27655d19e78157bcb2b448f879d8b34e0cd5c900" exitCode=0 Jan 28 13:00:02 crc kubenswrapper[4685]: I0128 13:00:02.095604 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" event={"ID":"68bc2246-25a1-4b4f-bdde-3686ba87ba9a","Type":"ContainerDied","Data":"04b16f369a0e52237b61d15a27655d19e78157bcb2b448f879d8b34e0cd5c900"} Jan 28 13:00:02 crc kubenswrapper[4685]: I0128 13:00:02.545824 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 13:00:02 crc kubenswrapper[4685]: E0128 13:00:02.546080 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.389127 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.572965 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-secret-volume\") pod \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.573049 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-config-volume\") pod \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.573091 4685 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spkbj\" (UniqueName: \"kubernetes.io/projected/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-kube-api-access-spkbj\") pod \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\" (UID: \"68bc2246-25a1-4b4f-bdde-3686ba87ba9a\") " Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.573793 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-config-volume" (OuterVolumeSpecName: "config-volume") pod "68bc2246-25a1-4b4f-bdde-3686ba87ba9a" (UID: "68bc2246-25a1-4b4f-bdde-3686ba87ba9a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.583304 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "68bc2246-25a1-4b4f-bdde-3686ba87ba9a" (UID: "68bc2246-25a1-4b4f-bdde-3686ba87ba9a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.583854 4685 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-kube-api-access-spkbj" (OuterVolumeSpecName: "kube-api-access-spkbj") pod "68bc2246-25a1-4b4f-bdde-3686ba87ba9a" (UID: "68bc2246-25a1-4b4f-bdde-3686ba87ba9a"). InnerVolumeSpecName "kube-api-access-spkbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.674417 4685 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.674464 4685 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:03 crc kubenswrapper[4685]: I0128 13:00:03.674475 4685 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spkbj\" (UniqueName: \"kubernetes.io/projected/68bc2246-25a1-4b4f-bdde-3686ba87ba9a-kube-api-access-spkbj\") on node \"crc\" DevicePath \"\"" Jan 28 13:00:04 crc kubenswrapper[4685]: I0128 13:00:04.112311 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" event={"ID":"68bc2246-25a1-4b4f-bdde-3686ba87ba9a","Type":"ContainerDied","Data":"fa8ce3d2163f7fe9d12cd574666502858f34c2d9d0226a991ab684757f08bee1"} Jan 28 13:00:04 crc kubenswrapper[4685]: I0128 13:00:04.112368 4685 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa8ce3d2163f7fe9d12cd574666502858f34c2d9d0226a991ab684757f08bee1" Jan 28 13:00:04 crc kubenswrapper[4685]: I0128 13:00:04.112434 4685 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493420-f2ksz" Jan 28 13:00:04 crc kubenswrapper[4685]: I0128 13:00:04.463591 4685 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp"] Jan 28 13:00:04 crc kubenswrapper[4685]: I0128 13:00:04.470215 4685 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493375-d96dp"] Jan 28 13:00:04 crc kubenswrapper[4685]: I0128 13:00:04.554592 4685 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="384b638e-e264-4492-8250-95540a24916c" path="/var/lib/kubelet/pods/384b638e-e264-4492-8250-95540a24916c/volumes" Jan 28 13:00:15 crc kubenswrapper[4685]: I0128 13:00:15.545502 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 13:00:15 crc kubenswrapper[4685]: E0128 13:00:15.546264 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 13:00:29 crc kubenswrapper[4685]: I0128 13:00:29.546321 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 13:00:29 crc kubenswrapper[4685]: E0128 13:00:29.547185 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 13:00:40 crc kubenswrapper[4685]: I0128 13:00:40.549919 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 13:00:40 crc kubenswrapper[4685]: E0128 13:00:40.551218 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.161720 4685 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dwhmx"] Jan 28 13:00:52 crc kubenswrapper[4685]: E0128 13:00:52.162831 4685 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68bc2246-25a1-4b4f-bdde-3686ba87ba9a" containerName="collect-profiles" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.162848 4685 state_mem.go:107] "Deleted CPUSet assignment" podUID="68bc2246-25a1-4b4f-bdde-3686ba87ba9a" containerName="collect-profiles" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.163063 4685 memory_manager.go:354] "RemoveStaleState removing state" podUID="68bc2246-25a1-4b4f-bdde-3686ba87ba9a" containerName="collect-profiles" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.164382 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.181879 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwhmx"] Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.202666 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1007e023-a6e6-4c36-900f-dce355c29d60-utilities\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.202719 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqk2p\" (UniqueName: \"kubernetes.io/projected/1007e023-a6e6-4c36-900f-dce355c29d60-kube-api-access-xqk2p\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.202802 4685 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1007e023-a6e6-4c36-900f-dce355c29d60-catalog-content\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.323744 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1007e023-a6e6-4c36-900f-dce355c29d60-utilities\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.323837 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqk2p\" (UniqueName: \"kubernetes.io/projected/1007e023-a6e6-4c36-900f-dce355c29d60-kube-api-access-xqk2p\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.324066 4685 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1007e023-a6e6-4c36-900f-dce355c29d60-catalog-content\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.325068 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1007e023-a6e6-4c36-900f-dce355c29d60-catalog-content\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.325235 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1007e023-a6e6-4c36-900f-dce355c29d60-utilities\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.348716 4685 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqk2p\" (UniqueName: \"kubernetes.io/projected/1007e023-a6e6-4c36-900f-dce355c29d60-kube-api-access-xqk2p\") pod \"redhat-marketplace-dwhmx\" (UID: \"1007e023-a6e6-4c36-900f-dce355c29d60\") " pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.489335 4685 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwhmx" Jan 28 13:00:52 crc kubenswrapper[4685]: I0128 13:00:52.996360 4685 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwhmx"] Jan 28 13:00:53 crc kubenswrapper[4685]: I0128 13:00:53.530015 4685 generic.go:334] "Generic (PLEG): container finished" podID="1007e023-a6e6-4c36-900f-dce355c29d60" containerID="914038c7f11f8c18eb41a6eb5c79b9924c2b0a8b25ea449c989e3d9d580fc40e" exitCode=0 Jan 28 13:00:53 crc kubenswrapper[4685]: I0128 13:00:53.530104 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwhmx" event={"ID":"1007e023-a6e6-4c36-900f-dce355c29d60","Type":"ContainerDied","Data":"914038c7f11f8c18eb41a6eb5c79b9924c2b0a8b25ea449c989e3d9d580fc40e"} Jan 28 13:00:53 crc kubenswrapper[4685]: I0128 13:00:53.530366 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwhmx" event={"ID":"1007e023-a6e6-4c36-900f-dce355c29d60","Type":"ContainerStarted","Data":"7251af0dc016b660245bd4cf022b1c4355a1a9ddcaab86c3c94fb3ed99618368"} Jan 28 13:00:53 crc kubenswrapper[4685]: I0128 13:00:53.531608 4685 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 13:00:53 crc kubenswrapper[4685]: I0128 13:00:53.546089 4685 scope.go:117] "RemoveContainer" containerID="2641a0dab6f217bc7e61951ba9c7e3b3dd19fe7452d4002ac6c0710cf0137283" Jan 28 13:00:53 crc kubenswrapper[4685]: E0128 13:00:53.546400 4685 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h5wpv_openshift-machine-config-operator(c708b01f-11f7-4c21-86c4-92ac3c7e9cb1)\"" pod="openshift-machine-config-operator/machine-config-daemon-h5wpv" podUID="c708b01f-11f7-4c21-86c4-92ac3c7e9cb1" Jan 28 13:00:54 crc kubenswrapper[4685]: I0128 13:00:54.597911 4685 scope.go:117] "RemoveContainer" containerID="baaf64ffda2228af339dc3dba36961ff66977e738ed6a60de82f094e83c09eab" Jan 28 13:00:55 crc kubenswrapper[4685]: I0128 13:00:55.557846 4685 generic.go:334] "Generic (PLEG): container finished" podID="1007e023-a6e6-4c36-900f-dce355c29d60" containerID="69bdafe60c0640b084e2f86a6cd07e43b00e7116e121c8d60b0a227038bdb6fc" exitCode=0 Jan 28 13:00:55 crc kubenswrapper[4685]: I0128 13:00:55.557891 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwhmx" event={"ID":"1007e023-a6e6-4c36-900f-dce355c29d60","Type":"ContainerDied","Data":"69bdafe60c0640b084e2f86a6cd07e43b00e7116e121c8d60b0a227038bdb6fc"} Jan 28 13:00:56 crc kubenswrapper[4685]: I0128 13:00:56.583637 4685 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwhmx" event={"ID":"1007e023-a6e6-4c36-900f-dce355c29d60","Type":"ContainerStarted","Data":"cdf3404df1bf0f0800a23482a119cce53a094eed5ec30b62b487ba7cabd8c256"} Jan 28 13:00:57 crc kubenswrapper[4685]: I0128 13:00:57.609834 4685 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dwhmx" podStartSLOduration=2.7637011830000002 podStartE2EDuration="5.60981419s" podCreationTimestamp="2026-01-28 13:00:52 +0000 UTC" firstStartedPulling="2026-01-28 13:00:53.531320473 +0000 UTC m=+2404.618734308" lastFinishedPulling="2026-01-28 13:00:56.37743348 +0000 UTC m=+2407.464847315" observedRunningTime="2026-01-28 13:00:57.607099553 +0000 UTC m=+2408.694513408" watchObservedRunningTime="2026-01-28 13:00:57.60981419 +0000 UTC m=+2408.697228025" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136404224024446 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136404225017364 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136377150016515 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136377151015466 5ustar corecore